diff --git a/.gitignore b/.gitignore index e69de29..13a46c8 100644 --- a/.gitignore +++ b/.gitignore @@ -0,0 +1,14 @@ +lapack-3.2.1.tgz +manpages.tgz +lapack-3.2.2.tgz +/lapack-3.3.1.tgz +/lapack-3.4.0.tgz +/lapack-3.4.1.tgz +/lapack-3.4.2.tgz +/lapack-3.4.2-clean.tgz +/lapack-3.5.0.tgz +/lapack-3.6.0.tgz +/lapack-3.6.1.tgz +/lapack-3.7.1.tgz +/lapack-3.8.0.tar.gz +/v3.9.0.tar.gz diff --git a/87536aa3c8bb0af00f66088fb6ac05d87509e011.patch b/87536aa3c8bb0af00f66088fb6ac05d87509e011.patch new file mode 100644 index 0000000..9c0fb4e --- /dev/null +++ b/87536aa3c8bb0af00f66088fb6ac05d87509e011.patch @@ -0,0 +1,138 @@ +From 87536aa3c8bb0af00f66088fb6ac05d87509e011 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?S=C3=A9bastien=20Villemot?= +Date: Sat, 23 Nov 2019 12:22:20 +0100 +Subject: [PATCH] Restore missing prototypes for deprecated LAPACK functions + +Some LAPACK functions prototypes were inadvertedly dropped in 3.9.0. As a +consequence, LAPACKE has several unresolved symbols. + +Closes #365 +--- + LAPACKE/include/lapack.h | 100 +++++++++++++++++++++++++++++++++++++++ + 1 file changed, 100 insertions(+) + +diff --git a/LAPACKE/include/lapack.h b/LAPACKE/include/lapack.h +index 3f425325f..5c131d844 100644 +--- a/LAPACKE/include/lapack.h ++++ b/LAPACKE/include/lapack.h +@@ -1842,6 +1842,28 @@ void LAPACK_zgeqlf( + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + ++#define LAPACK_sgeqpf LAPACK_GLOBAL(sgeqpf,SGEQPF) ++void LAPACK_sgeqpf( lapack_int* m, lapack_int* n, float* a, lapack_int* lda, ++ lapack_int* jpvt, float* tau, float* work, ++ lapack_int *info ); ++ ++#define LAPACK_dgeqpf LAPACK_GLOBAL(dgeqpf,DGEQPF) ++void LAPACK_dgeqpf( lapack_int* m, lapack_int* n, double* a, lapack_int* lda, ++ lapack_int* jpvt, double* tau, double* work, ++ lapack_int *info ); ++ ++#define LAPACK_cgeqpf LAPACK_GLOBAL(cgeqpf,CGEQPF) ++void LAPACK_cgeqpf( lapack_int* m, lapack_int* n, lapack_complex_float* a, ++ lapack_int* lda, lapack_int* jpvt, ++ lapack_complex_float* tau, lapack_complex_float* work, ++ float* rwork, lapack_int *info ); ++ ++#define LAPACK_zgeqpf LAPACK_GLOBAL(zgeqpf,ZGEQPF) ++void LAPACK_zgeqpf( lapack_int* m, lapack_int* n, lapack_complex_double* a, ++ lapack_int* lda, lapack_int* jpvt, ++ lapack_complex_double* tau, lapack_complex_double* work, ++ double* rwork, lapack_int *info ); ++ + #define LAPACK_cgeqp3 LAPACK_GLOBAL(cgeqp3,CGEQP3) + void LAPACK_cgeqp3( + lapack_int const* m, lapack_int const* n, +@@ -3617,6 +3639,47 @@ void LAPACK_zggrqf( + lapack_complex_double* work, lapack_int const* lwork, + lapack_int* info ); + ++#define LAPACK_sggsvd LAPACK_GLOBAL(sggsvd,SGGSVD) ++lapack_int LAPACKE_sggsvd( int matrix_layout, char jobu, char jobv, char jobq, ++ lapack_int m, lapack_int n, lapack_int p, ++ lapack_int* k, lapack_int* l, float* a, ++ lapack_int lda, float* b, lapack_int ldb, ++ float* alpha, float* beta, float* u, lapack_int ldu, ++ float* v, lapack_int ldv, float* q, lapack_int ldq, ++ lapack_int* iwork ); ++ ++#define LAPACK_dggsvd LAPACK_GLOBAL(dggsvd,DGGSVD) ++lapack_int LAPACKE_dggsvd( int matrix_layout, char jobu, char jobv, char jobq, ++ lapack_int m, lapack_int n, lapack_int p, ++ lapack_int* k, lapack_int* l, double* a, ++ lapack_int lda, double* b, lapack_int ldb, ++ double* alpha, double* beta, double* u, ++ lapack_int ldu, double* v, lapack_int ldv, double* q, ++ lapack_int ldq, lapack_int* iwork ); ++ ++#define LAPACK_cggsvd LAPACK_GLOBAL(cggsvd,CGGSVD) ++lapack_int LAPACKE_cggsvd( int matrix_layout, char jobu, char jobv, char jobq, ++ lapack_int m, lapack_int n, lapack_int p, ++ lapack_int* k, lapack_int* l, ++ lapack_complex_float* a, lapack_int lda, ++ lapack_complex_float* b, lapack_int ldb, ++ float* alpha, float* beta, lapack_complex_float* u, ++ lapack_int ldu, lapack_complex_float* v, ++ lapack_int ldv, lapack_complex_float* q, ++ lapack_int ldq, lapack_int* iwork ); ++ ++#define LAPACK_zggsvd LAPACK_GLOBAL(zggsvd,ZGGSVD) ++lapack_int LAPACKE_zggsvd( int matrix_layout, char jobu, char jobv, char jobq, ++ lapack_int m, lapack_int n, lapack_int p, ++ lapack_int* k, lapack_int* l, ++ lapack_complex_double* a, lapack_int lda, ++ lapack_complex_double* b, lapack_int ldb, ++ double* alpha, double* beta, ++ lapack_complex_double* u, lapack_int ldu, ++ lapack_complex_double* v, lapack_int ldv, ++ lapack_complex_double* q, lapack_int ldq, ++ lapack_int* iwork ); ++ + #define LAPACK_cggsvd3 LAPACK_GLOBAL(cggsvd3,CGGSVD3) + void LAPACK_cggsvd3( + char const* jobu, char const* jobv, char const* jobq, +@@ -3679,6 +3742,43 @@ void LAPACK_zggsvd3( + lapack_int* iwork, + lapack_int* info ); + ++#define LAPACK_sggsvp LAPACK_GLOBAL(sggsvp,SGGSVP) ++lapack_int LAPACKE_sggsvp( int matrix_layout, char jobu, char jobv, char jobq, ++ lapack_int m, lapack_int p, lapack_int n, float* a, ++ lapack_int lda, float* b, lapack_int ldb, float tola, ++ float tolb, lapack_int* k, lapack_int* l, float* u, ++ lapack_int ldu, float* v, lapack_int ldv, float* q, ++ lapack_int ldq ); ++ ++#define LAPACK_dggsvp LAPACK_GLOBAL(dggsvp,DGGSVP) ++lapack_int LAPACKE_dggsvp( int matrix_layout, char jobu, char jobv, char jobq, ++ lapack_int m, lapack_int p, lapack_int n, double* a, ++ lapack_int lda, double* b, lapack_int ldb, ++ double tola, double tolb, lapack_int* k, ++ lapack_int* l, double* u, lapack_int ldu, double* v, ++ lapack_int ldv, double* q, lapack_int ldq ); ++ ++#define LAPACK_cggsvp LAPACK_GLOBAL(cggsvp,CGGSVP) ++lapack_int LAPACKE_cggsvp( int matrix_layout, char jobu, char jobv, char jobq, ++ lapack_int m, lapack_int p, lapack_int n, ++ lapack_complex_float* a, lapack_int lda, ++ lapack_complex_float* b, lapack_int ldb, float tola, ++ float tolb, lapack_int* k, lapack_int* l, ++ lapack_complex_float* u, lapack_int ldu, ++ lapack_complex_float* v, lapack_int ldv, ++ lapack_complex_float* q, lapack_int ldq ); ++ ++#define LAPACK_zggsvp LAPACK_GLOBAL(zggsvp,ZGGSVP) ++lapack_int LAPACKE_zggsvp( int matrix_layout, char jobu, char jobv, char jobq, ++ lapack_int m, lapack_int p, lapack_int n, ++ lapack_complex_double* a, lapack_int lda, ++ lapack_complex_double* b, lapack_int ldb, ++ double tola, double tolb, lapack_int* k, ++ lapack_int* l, lapack_complex_double* u, ++ lapack_int ldu, lapack_complex_double* v, ++ lapack_int ldv, lapack_complex_double* q, ++ lapack_int ldq ); ++ + #define LAPACK_cggsvp3 LAPACK_GLOBAL(cggsvp3,CGGSVP3) + void LAPACK_cggsvp3( + char const* jobu, char const* jobv, char const* jobq, diff --git a/Makefile.blas b/Makefile.blas new file mode 100644 index 0000000..5cfb092 --- /dev/null +++ b/Makefile.blas @@ -0,0 +1,84 @@ +OBJCOPY = objcopy + +ifndef SYMBOLSUFFIX +SYMBOLSUFFIX = +endif + +clean: + rm -f *.o libblas.a libblas.so* + +SBLAS1= isamax.o sasum.o saxpy.o scopy.o sdot.o snrm2.o \ + srot.o srotg.o sscal.o sswap.o sdsdot.o srotmg.o srotm.o + +CBLAS1= scabs1.o scasum.o scnrm2.o icamax.o caxpy.o ccopy.o \ + cdotc.o cdotu.o csscal.o crotg.o cscal.o cswap.o csrot.o + +DBLAS1= idamax.o dasum.o daxpy.o dcopy.o ddot.o dnrm2.o \ + drot.o drotg.o dscal.o dsdot.o dswap.o drotmg.o drotm.o + +ZBLAS1= dcabs1.o dzasum.o dznrm2.o izamax.o zaxpy.o zcopy.o \ + zdotc.o zdotu.o zdscal.o zrotg.o zscal.o zswap.o zdrot.o + +CB1AUX= isamax.o sasum.o saxpy.o scopy.o snrm2.o sscal.o + +ZB1AUX= idamax.o dasum.o daxpy.o dcopy.o dnrm2.o dscal.o + +ALLBLAS=lsame.o xerbla.o xerbla_array.o + +SBLAS2= sgemv.o sgbmv.o ssymv.o ssbmv.o sspmv.o \ + strmv.o stbmv.o stpmv.o strsv.o stbsv.o stpsv.o \ + sger.o ssyr.o sspr.o ssyr2.o sspr2.o + +CBLAS2= cgemv.o cgbmv.o chemv.o chbmv.o chpmv.o \ + ctrmv.o ctbmv.o ctpmv.o ctrsv.o ctbsv.o ctpsv.o \ + cgerc.o cgeru.o cher.o chpr.o cher2.o chpr2.o + +DBLAS2= dgemv.o dgbmv.o dsymv.o dsbmv.o dspmv.o \ + dtrmv.o dtbmv.o dtpmv.o dtrsv.o dtbsv.o dtpsv.o \ + dger.o dsyr.o dspr.o dsyr2.o dspr2.o + +ZBLAS2= zgemv.o zgbmv.o zhemv.o zhbmv.o zhpmv.o \ + ztrmv.o ztbmv.o ztpmv.o ztrsv.o ztbsv.o ztpsv.o \ + zgerc.o zgeru.o zher.o zhpr.o zher2.o zhpr2.o \ + +SBLAS3= sgemm.o ssymm.o ssyrk.o ssyr2k.o strmm.o strsm.o + +CBLAS3= cgemm.o csymm.o csyrk.o csyr2k.o ctrmm.o ctrsm.o \ + chemm.o cherk.o cher2k.o + +DBLAS3= dgemm.o dsymm.o dsyrk.o dsyr2k.o dtrmm.o dtrsm.o + +ZBLAS3= zgemm.o zsymm.o zsyrk.o zsyr2k.o ztrmm.o ztrsm.o \ + zhemm.o zherk.o zher2k.o + +OBJS= $(SBLAS1) $(SBLAS2) $(SBLAS3) $(DBLAS1) $(DBLAS2) $(DBLAS3) \ + $(CBLAS1) $(CBLAS2) $(CBLAS3) $(ZBLAS1) $(ZBLAS2) $(ZBLAS3) \ + $(ALLBLAS) + +static: $(OBJS) +ifeq "$(SYMBOLSUFFIX)" "" + ar ruv libblas.a $(OBJS) + ranlib libblas.a +else + ar ruv libblas$(SYMBOLSUFFIX).a $(OBJS) + ranlib libblas$(SYMBOLSUFFIX).a + for i in `nm libblas$(SYMBOLSUFFIX).a |grep " T " | awk '{print $$3}'`; do echo "$$i" "64_$$i"; done > blas-static-prefix.def.dirty + sort -n blas-static-prefix.def.dirty | uniq > blas-static-prefix.def + $(OBJCOPY) --redefine-syms blas-static-prefix.def libblas$(SYMBOLSUFFIX).a libblas$(SYMBOLSUFFIX).a.fixed + rm -rf libblas$(SYMBOLSUFFIX).a + mv libblas$(SYMBOLSUFFIX).a.fixed libblas$(SYMBOLSUFFIX).a +endif + +shared: $(OBJS) +ifeq "$(SYMBOLSUFFIX)" "" + # No renaming needed + cc $(CFLAGS) -shared -Wl,-soname,libblas.so.@SHORTVER@ -o libblas.so.@LONGVER@ $(OBJS) $(LDFLAGS) -lm -lgfortran -lc +else + cc $(CFLAGS) -shared -Wl,-soname,libblas$(SYMBOLSUFFIX).so.@SHORTVER@ -o libblas$(SYMBOLSUFFIX).so.@LONGVER@ $(OBJS) $(LDFLAGS) -lm -lgfortran -lc + # generate a list of all symbols in shared library and rename with SYMBOLSUFFIX + for i in `readelf -Ws libblas$(SYMBOLSUFFIX).so.@LONGVER@ | awk '{print $$8}' | grep -v GLIBC |grep -v GFORTRAN |grep -v "Name" `; do echo "$$i" "64_$$i"; done > blas-prefix.def.dirty + sort -n blas-prefix.def.dirty | uniq > blas-prefix.def + $(OBJCOPY) --redefine-syms blas-prefix.def libblas$(SYMBOLSUFFIX).so.@LONGVER@ libblas$(SYMBOLSUFFIX).so.@LONGVER@.fixed + rm -rf libblas$(SYMBOLSUFFIX).so.@LONGVER@ + mv libblas$(SYMBOLSUFFIX).so.@LONGVER@.fixed libblas$(SYMBOLSUFFIX).so.@LONGVER@ +endif diff --git a/Makefile.cblas b/Makefile.cblas new file mode 100644 index 0000000..cb98d2e --- /dev/null +++ b/Makefile.cblas @@ -0,0 +1,99 @@ +OBJCOPY = objcopy + +ifndef SYMBOLSUFFIX +SYMBOLSUFFIX = +endif + +clean: + rm -f *.o libcblas.a libcblas.so* + +ERRHAND= cblas_globals.o cblas_xerbla.o xerbla.o + +SLEV1= cblas_srotg.o cblas_srotmg.o cblas_srot.o cblas_srotm.o \ + cblas_sswap.o cblas_sscal.o cblas_scopy.o cblas_saxpy.o \ + cblas_sdot.o cblas_sdsdot.o cblas_snrm2.o cblas_sasum.o \ + cblas_isamax.o sdotsub.o sdsdotsub.o snrm2sub.o sasumsub.o \ + isamaxsub.o + +DLEV1= cblas_drotg.o cblas_drotmg.o cblas_drot.o cblas_drotm.o \ + cblas_dswap.o cblas_dscal.o cblas_dcopy.o cblas_daxpy.o \ + cblas_ddot.o cblas_dsdot.o cblas_dnrm2.o cblas_dasum.o \ + cblas_idamax.o ddotsub.f dsdotsub.o dnrm2sub.o \ + dasumsub.o idamaxsub.o + +CLEV1= cblas_cswap.o cblas_cscal.o cblas_csscal.o cblas_ccopy.o \ + cblas_caxpy.o cblas_cdotu_sub.o cblas_cdotc_sub.o \ + cblas_icamax.o cdotcsub.o cdotusub.o icamaxsub.o + +ZLEV1= cblas_zswap.o cblas_zscal.o cblas_zdscal.o cblas_zcopy.o \ + cblas_zaxpy.o cblas_zdotu_sub.o cblas_zdotc_sub.o cblas_dznrm2.o \ + cblas_dzasum.o cblas_izamax.o zdotcsub.o zdotusub.o \ + dzasumsub.o dznrm2sub.o izamaxsub.o + +SCLEV1= cblas_scasum.o scasumsub.o cblas_scnrm2.o scnrm2sub.o + +SLEV2= cblas_sgemv.o cblas_sgbmv.o cblas_sger.o cblas_ssbmv.o cblas_sspmv.o \ + cblas_sspr.o cblas_sspr2.o cblas_ssymv.o cblas_ssyr.o cblas_ssyr2.o \ + cblas_stbmv.o cblas_stbsv.o cblas_stpmv.o cblas_stpsv.o cblas_strmv.o \ + cblas_strsv.o + +DLEV2= cblas_dgemv.o cblas_dgbmv.o cblas_dger.o cblas_dsbmv.o cblas_dspmv.o \ + cblas_dspr.o cblas_dspr2.o cblas_dsymv.o cblas_dsyr.o cblas_dsyr2.o \ + cblas_dtbmv.o cblas_dtbsv.o cblas_dtpmv.o cblas_dtpsv.o cblas_dtrmv.o \ + cblas_dtrsv.o + +CLEV2= cblas_cgemv.o cblas_cgbmv.o cblas_chemv.o cblas_chbmv.o cblas_chpmv.o \ + cblas_ctrmv.o cblas_ctbmv.o cblas_ctpmv.o cblas_ctrsv.o cblas_ctbsv.o \ + cblas_ctpsv.o cblas_cgeru.o cblas_cgerc.o cblas_cher.o cblas_cher2.o \ + cblas_chpr.o cblas_chpr2.o + +ZLEV2= cblas_zgemv.o cblas_zgbmv.o cblas_zhemv.o cblas_zhbmv.o cblas_zhpmv.o \ + cblas_ztrmv.o cblas_ztbmv.o cblas_ztpmv.o cblas_ztrsv.o cblas_ztbsv.o \ + cblas_ztpsv.o cblas_zgeru.o cblas_zgerc.o cblas_zher.o cblas_zher2.o \ + cblas_zhpr.o cblas_zhpr2.o + +SLEV3= cblas_sgemm.o cblas_ssymm.o cblas_ssyrk.o cblas_ssyr2k.o cblas_strmm.o \ + cblas_strsm.o + +DLEV3= cblas_dgemm.o cblas_dsymm.o cblas_dsyrk.o cblas_dsyr2k.o cblas_dtrmm.o \ + cblas_dtrsm.o + +CLEV3= cblas_cgemm.o cblas_csymm.o cblas_chemm.o cblas_cherk.o \ + cblas_cher2k.o cblas_ctrmm.o cblas_ctrsm.o cblas_csyrk.o \ + cblas_csyr2k.o + +ZLEV3= cblas_zgemm.o cblas_zsymm.o cblas_zhemm.o cblas_zherk.o \ + cblas_zher2k.o cblas_ztrmm.o cblas_ztrsm.o cblas_zsyrk.o \ + cblas_zsyr2k.o + +OBJS= $(SLEV1) $(DLEV1) $(CLEV1) $(ZLEV1) $(SCLEV1) $(SLEV2) $(DLEV2) $(CLEV2) \ + $(ZLEV2) $(SLEV3) $(DLEV3) $(CLEV3) $(ZLEV3) $(ERRHAND) + +static: $(OBJS) +ifeq "$(SYMBOLSUFFIX)" "" + ar ruv libcblas.a $(OBJS) + ranlib libcblas.a +else + ar ruv libcblas$(SYMBOLSUFFIX).a $(OBJS) + ranlib libcblas$(SYMBOLSUFFIX).a + for i in `nm libcblas$(SYMBOLSUFFIX).a |grep " T " | awk '{print $$3}'`; do echo "$$i" "64_$$i"; done > cblas-static-prefix.def.dirty + sort -n cblas-static-prefix.def.dirty | uniq > cblas-static-prefix.def + $(OBJCOPY) --redefine-syms cblas-static-prefix.def libcblas$(SYMBOLSUFFIX).a libcblas$(SYMBOLSUFFIX).a.fixed + rm -rf libcblas$(SYMBOLSUFFIX).a + mv libcblas$(SYMBOLSUFFIX).a.fixed libcblas$(SYMBOLSUFFIX).a +endif + +shared: $(OBJS) +ifeq "$(SYMBOLSUFFIX)" "" + # No renaming needed + cc $(CFLAGS) -shared -Wl,-soname,libcblas.so.@SHORTVER@ -o libcblas.so.@LONGVER@ $(OBJS) $(LDFLAGS) -L../../ -lblas -lm -lgfortran -lc +else + cc $(CFLAGS) -shared -Wl,-soname,libcblas$(SYMBOLSUFFIX).so.@SHORTVER@ -o libcblas$(SYMBOLSUFFIX).so.@LONGVER@ $(OBJS) $(LDFLAGS) -L../.. -lblas -lm -lgfortran -lc + # generate a list of all symbols in shared library and rename with SYMBOLSUFFIX + for i in `readelf -Ws libcblas$(SYMBOLSUFFIX).so.@LONGVER@ | awk '{print $$8}' | grep -v GLIBC |grep -v GFORTRAN |grep -v "Name" `; do echo "$$i" "64_$$i"; done > cblas-prefix.def.dirty + sort -n cblas-prefix.def.dirty | uniq > cblas-prefix.def + $(OBJCOPY) --redefine-syms cblas-prefix.def libcblas$(SYMBOLSUFFIX).so.@LONGVER@ libcblas$(SYMBOLSUFFIX).so.@LONGVER@.fixed + rm -rf libcblas$(SYMBOLSUFFIX).so.@LONGVER@ + mv libcblas$(SYMBOLSUFFIX).so.@LONGVER@.fixed libcblas$(SYMBOLSUFFIX).so.@LONGVER@ +endif + diff --git a/Makefile.lapack b/Makefile.lapack new file mode 100644 index 0000000..6b4328d --- /dev/null +++ b/Makefile.lapack @@ -0,0 +1,478 @@ +include ../make.inc + +OBJCOPY = objcopy + +ifndef SYMBOLSUFFIX +SYMBOLSUFFIX = +endif + +clean: + rm -f *.o liblapack* DEPRECATED/*.o + +SLASRC = sbdsvdx.o sgbbrd.o sgbcon.o sgbequ.o sgbrfs.o sgbsv.o \ + sgbsvx.o sgbtf2.o sgbtrf.o sgbtrs.o sgebak.o sgebal.o sgebd2.o \ + sgebrd.o sgecon.o sgeequ.o sgees.o sgeesx.o sgeev.o sgeevx.o \ + sgehd2.o sgehrd.o sgelq2.o sgelqf.o \ + sgels.o sgelsd.o sgelss.o sgelsy.o sgeql2.o sgeqlf.o \ + sgeqp3.o sgeqr2.o sgeqr2p.o sgeqrf.o sgeqrfp.o sgerfs.o sgerq2.o sgerqf.o \ + sgesc2.o sgesdd.o sgesv.o sgesvd.o sgesvdx.o sgesvx.o sgetc2.o sgetf2.o \ + sgetrf2.o sgetri.o \ + sggbak.o sggbal.o \ + sgges.o sgges3.o sggesx.o sggev.o sggev3.o sggevx.o \ + sggglm.o sgghrd.o sgghd3.o sgglse.o sggqrf.o \ + sggrqf.o sggsvd3.o sggsvp3.o sgtcon.o sgtrfs.o sgtsv.o \ + sgtsvx.o sgttrf.o sgttrs.o sgtts2.o shgeqz.o \ + shsein.o shseqr.o slabrd.o slacon.o slacn2.o \ + slaein.o slaexc.o slag2.o slags2.o slagtm.o slagv2.o slahqr.o \ + slahr2.o slaic1.o slaln2.o slals0.o slalsa.o slalsd.o \ + slangb.o slange.o slangt.o slanhs.o slansb.o slansp.o \ + slansy.o slantb.o slantp.o slantr.o slanv2.o \ + slapll.o slapmt.o \ + slaqgb.o slaqge.o slaqp2.o slaqps.o slaqsb.o slaqsp.o slaqsy.o \ + slaqr0.o slaqr1.o slaqr2.o slaqr3.o slaqr4.o slaqr5.o \ + slaqtr.o slar1v.o slar2v.o ilaslr.o ilaslc.o \ + slarf.o slarfb.o slarfg.o slarfgp.o slarft.o slarfx.o slarfy.o slargv.o \ + slarrv.o slartv.o \ + slarz.o slarzb.o slarzt.o slaswp.o slasy2.o \ + slasyf.o slasyf_rook.o slasyf_rk.o slasyf_aa.o \ + slatbs.o slatdf.o slatps.o slatrd.o slatrs.o slatrz.o \ + slauu2.o slauum.o sopgtr.o sopmtr.o sorg2l.o sorg2r.o \ + sorgbr.o sorghr.o sorgl2.o sorglq.o sorgql.o sorgqr.o sorgr2.o \ + sorgrq.o sorgtr.o sorgtsqr.o sorm2l.o sorm2r.o sorm22.o \ + sormbr.o sormhr.o sorml2.o sormlq.o sormql.o sormqr.o sormr2.o \ + sormr3.o sormrq.o sormrz.o sormtr.o spbcon.o spbequ.o spbrfs.o \ + spbstf.o spbsv.o spbsvx.o \ + spbtf2.o spbtrf.o spbtrs.o spocon.o spoequ.o sporfs.o sposv.o \ + sposvx.o spotf2.o spotrf2.o spotri.o spstrf.o spstf2.o \ + sppcon.o sppequ.o \ + spprfs.o sppsv.o sppsvx.o spptrf.o spptri.o spptrs.o sptcon.o \ + spteqr.o sptrfs.o sptsv.o sptsvx.o spttrs.o sptts2.o srscl.o \ + ssbev.o ssbevd.o ssbevx.o ssbgst.o ssbgv.o ssbgvd.o ssbgvx.o \ + ssbtrd.o sspcon.o sspev.o sspevd.o sspevx.o sspgst.o \ + sspgv.o sspgvd.o sspgvx.o ssprfs.o sspsv.o sspsvx.o ssptrd.o \ + ssptrf.o ssptri.o ssptrs.o sstegr.o sstein.o sstev.o sstevd.o sstevr.o \ + sstevx.o ssycon.o ssyev.o ssyevd.o ssyevr.o ssyevx.o ssygs2.o \ + ssygst.o ssygv.o ssygvd.o ssygvx.o ssyrfs.o ssysv.o ssysvx.o \ + ssytd2.o ssytf2.o ssytrd.o ssytrf.o ssytri.o ssytri2.o ssytri2x.o \ + ssyswapr.o ssytrs.o ssytrs2.o \ + ssyconv.o ssyconvf.o ssyconvf_rook.o \ + ssytf2_rook.o ssytrf_rook.o ssytrs_rook.o \ + ssytri_rook.o ssycon_rook.o ssysv_rook.o \ + ssytf2_rk.o ssytrf_rk.o ssytrs_3.o \ + ssytri_3.o ssytri_3x.o ssycon_3.o ssysv_rk.o \ + ssysv_aa.o ssytrf_aa.o ssytrs_aa.o \ + ssysv_aa_2stage.o ssytrf_aa_2stage.o ssytrs_aa_2stage.o \ + stbcon.o \ + stbrfs.o stbtrs.o stgevc.o stgex2.o stgexc.o stgsen.o \ + stgsja.o stgsna.o stgsy2.o stgsyl.o stpcon.o stprfs.o stptri.o \ + stptrs.o \ + strcon.o strevc.o strevc3.o strexc.o strrfs.o strsen.o strsna.o strsyl.o \ + strti2.o strtri.o strtrs.o stzrzf.o sstemr.o \ + slansf.o spftrf.o spftri.o spftrs.o ssfrk.o stfsm.o stftri.o stfttp.o \ + stfttr.o stpttf.o stpttr.o strttf.o strttp.o \ + sgejsv.o sgesvj.o sgsvj0.o sgsvj1.o \ + sgeequb.o ssyequb.o spoequb.o sgbequb.o \ + sbbcsd.o slapmr.o sorbdb.o sorbdb1.o sorbdb2.o sorbdb3.o sorbdb4.o \ + sorbdb5.o sorbdb6.o sorcsd.o sorcsd2by1.o \ + sgeqrt.o sgeqrt2.o sgeqrt3.o sgemqrt.o \ + stpqrt.o stpqrt2.o stpmqrt.o stprfb.o \ + sgelqt.o sgelqt3.o sgemlqt.o \ + sgetsls.o sgeqr.o slatsqr.o slamtsqr.o sgemqr.o \ + sgelq.o slaswlq.o slamswlq.o sgemlq.o \ + stplqt.o stplqt2.o stpmlqt.o \ + sorhr_col.o slaorhr_col_getrfnp.o slaorhr_col_getrfnp2.o \ + ssytrd_2stage.o ssytrd_sy2sb.o ssytrd_sb2st.o ssb2st_kernels.o \ + ssyevd_2stage.o ssyev_2stage.o ssyevx_2stage.o ssyevr_2stage.o \ + ssbev_2stage.o ssbevx_2stage.o ssbevd_2stage.o ssygv_2stage.o \ + sgesvdq.o scombssq.o \ + DEPRECATED/sgegs.o DEPRECATED/sgegv.o DEPRECATED/sgeqpf.o DEPRECATED/sgelsx.o \ + DEPRECATED/sggsvd.o DEPRECATED/sggsvp.o DEPRECATED/slahrd.o DEPRECATED/slatzm.o \ + DEPRECATED/stzrqf.o + +DSLASRC = spotrs.o sgetrs.o spotrf.o sgetrf.o + +CLASRC = cbdsqr.o cgbbrd.o cgbcon.o cgbequ.o cgbrfs.o cgbsv.o cgbsvx.o \ + cgbtf2.o cgbtrf.o cgbtrs.o cgebak.o cgebal.o cgebd2.o cgebrd.o \ + cgecon.o cgeequ.o cgees.o cgeesx.o cgeev.o cgeevx.o \ + cgehd2.o cgehrd.o cgelq2.o cgelqf.o \ + cgels.o cgelsd.o cgelss.o cgelsy.o cgeql2.o cgeqlf.o cgeqp3.o \ + cgeqr2.o cgeqr2p.o cgeqrf.o cgeqrfp.o cgerfs.o cgerq2.o cgerqf.o \ + cgesc2.o cgesdd.o cgesv.o cgesvd.o cgesvdx.o \ + cgesvj.o cgejsv.o cgsvj0.o cgsvj1.o \ + cgesvx.o cgetc2.o cgetf2.o cgetrf2.o \ + cgetri.o \ + cggbak.o cggbal.o \ + cgges.o cgges3.o cggesx.o cggev.o cggev3.o cggevx.o \ + cggglm.o cgghrd.o cgghd3.o cgglse.o cggqrf.o cggrqf.o \ + cggsvd3.o cggsvp3.o \ + cgtcon.o cgtrfs.o cgtsv.o cgtsvx.o cgttrf.o cgttrs.o cgtts2.o chbev.o \ + chbevd.o chbevx.o chbgst.o chbgv.o chbgvd.o chbgvx.o chbtrd.o \ + checon.o cheev.o cheevd.o cheevr.o cheevx.o chegs2.o chegst.o \ + chegv.o chegvd.o chegvx.o cherfs.o chesv.o chesvx.o chetd2.o \ + chetf2.o chetrd.o \ + chetrf.o chetri.o chetri2.o chetri2x.o cheswapr.o \ + chetrs.o chetrs2.o \ + chetf2_rook.o chetrf_rook.o chetri_rook.o \ + chetrs_rook.o checon_rook.o chesv_rook.o \ + chetf2_rk.o chetrf_rk.o chetri_3.o chetri_3x.o \ + chetrs_3.o checon_3.o chesv_rk.o \ + chesv_aa.o chetrf_aa.o chetrs_aa.o \ + chesv_aa_2stage.o chetrf_aa_2stage.o chetrs_aa_2stage.o \ + chgeqz.o chpcon.o chpev.o chpevd.o \ + chpevx.o chpgst.o chpgv.o chpgvd.o chpgvx.o chprfs.o chpsv.o \ + chpsvx.o \ + chptrd.o chptrf.o chptri.o chptrs.o chsein.o chseqr.o clabrd.o \ + clacgv.o clacon.o clacn2.o clacp2.o clacpy.o clacrm.o clacrt.o cladiv.o \ + claed0.o claed7.o claed8.o \ + claein.o claesy.o claev2.o clags2.o clagtm.o \ + clahef.o clahef_rook.o clahef_rk.o clahef_aa.o clahqr.o \ + clahr2.o claic1.o clals0.o clalsa.o clalsd.o clangb.o clange.o clangt.o \ + clanhb.o clanhe.o \ + clanhp.o clanhs.o clanht.o clansb.o clansp.o clansy.o clantb.o \ + clantp.o clantr.o clapll.o clapmt.o clarcm.o claqgb.o claqge.o \ + claqhb.o claqhe.o claqhp.o claqp2.o claqps.o claqsb.o \ + claqr0.o claqr1.o claqr2.o claqr3.o claqr4.o claqr5.o \ + claqsp.o claqsy.o clar1v.o clar2v.o ilaclr.o ilaclc.o \ + clarf.o clarfb.o clarfg.o clarfgp.o clarft.o \ + clarfx.o clarfy.o clargv.o clarnv.o clarrv.o clartg.o clartv.o \ + clarz.o clarzb.o clarzt.o clascl.o claset.o clasr.o classq.o \ + claswp.o clasyf.o clasyf_rook.o clasyf_rk.o clasyf_aa.o \ + clatbs.o clatdf.o clatps.o clatrd.o clatrs.o clatrz.o \ + clauu2.o clauum.o cpbcon.o cpbequ.o cpbrfs.o cpbstf.o cpbsv.o \ + cpbsvx.o cpbtf2.o cpbtrf.o cpbtrs.o cpocon.o cpoequ.o cporfs.o \ + cposv.o cposvx.o cpotf2.o cpotrf2.o cpotri.o cpstrf.o cpstf2.o \ + cppcon.o cppequ.o cpprfs.o cppsv.o cppsvx.o cpptrf.o cpptri.o cpptrs.o \ + cptcon.o cpteqr.o cptrfs.o cptsv.o cptsvx.o cpttrf.o cpttrs.o cptts2.o \ + crot.o cspcon.o cspmv.o cspr.o csprfs.o cspsv.o \ + cspsvx.o csptrf.o csptri.o csptrs.o csrscl.o cstedc.o \ + cstegr.o cstein.o csteqr.o csycon.o csymv.o \ + csyr.o csyrfs.o csysv.o csysvx.o csytf2.o csytrf.o csytri.o \ + csytri2.o csytri2x.o csyswapr.o \ + csytrs.o csytrs2.o \ + csyconv.o csyconvf.o csyconvf_rook.o \ + csytf2_rook.o csytrf_rook.o csytrs_rook.o \ + csytri_rook.o csycon_rook.o csysv_rook.o \ + csytf2_rk.o csytrf_rk.o csytrf_aa.o csytrs_3.o csytrs_aa.o \ + csytri_3.o csytri_3x.o csycon_3.o csysv_rk.o csysv_aa.o \ + csysv_aa_2stage.o csytrf_aa_2stage.o csytrs_aa_2stage.o \ + ctbcon.o ctbrfs.o ctbtrs.o ctgevc.o ctgex2.o \ + ctgexc.o ctgsen.o ctgsja.o ctgsna.o ctgsy2.o ctgsyl.o ctpcon.o \ + ctprfs.o ctptri.o \ + ctptrs.o ctrcon.o ctrevc.o ctrevc3.o ctrexc.o ctrrfs.o ctrsen.o ctrsna.o \ + ctrsyl.o ctrti2.o ctrtri.o ctrtrs.o ctzrzf.o cung2l.o cung2r.o \ + cungbr.o cunghr.o cungl2.o cunglq.o cungql.o cungqr.o cungr2.o \ + cungrq.o cungtr.o cungtsqr.o cunm2l.o cunm2r.o cunmbr.o cunmhr.o cunml2.o cunm22.o \ + cunmlq.o cunmql.o cunmqr.o cunmr2.o cunmr3.o cunmrq.o cunmrz.o \ + cunmtr.o cupgtr.o cupmtr.o icmax1.o scsum1.o cstemr.o \ + chfrk.o ctfttp.o clanhf.o cpftrf.o cpftri.o cpftrs.o ctfsm.o ctftri.o \ + ctfttr.o ctpttf.o ctpttr.o ctrttf.o ctrttp.o \ + cgeequb.o cgbequb.o csyequb.o cpoequb.o cheequb.o \ + cbbcsd.o clapmr.o cunbdb.o cunbdb1.o cunbdb2.o cunbdb3.o cunbdb4.o \ + cunbdb5.o cunbdb6.o cuncsd.o cuncsd2by1.o \ + cgeqrt.o cgeqrt2.o cgeqrt3.o cgemqrt.o \ + ctpqrt.o ctpqrt2.o ctpmqrt.o ctprfb.o \ + cgelqt.o cgelqt3.o cgemlqt.o \ + cgetsls.o cgeqr.o clatsqr.o clamtsqr.o cgemqr.o \ + cgelq.o claswlq.o clamswlq.o cgemlq.o \ + ctplqt.o ctplqt2.o ctpmlqt.o \ + cunhr_col.o claunhr_col_getrfnp.o claunhr_col_getrfnp2.o \ + chetrd_2stage.o chetrd_he2hb.o chetrd_hb2st.o chb2st_kernels.o \ + cheevd_2stage.o cheev_2stage.o cheevx_2stage.o cheevr_2stage.o \ + chbev_2stage.o chbevx_2stage.o chbevd_2stage.o chegv_2stage.o \ + cgesvdq.o \ + DEPRECATED/cgegs.o DEPRECATED/cgegv.o \ + DEPRECATED/cgeqpf.o DEPRECATED/cgelsx.o DEPRECATED/cggsvd.o \ + DEPRECATED/cggsvp.o DEPRECATED/clahrd.o DEPRECATED/clatzm.o DEPRECATED/ctzrqf.o + +DLASRC = dbdsvdx.o dgbbrd.o dgbcon.o dgbequ.o dgbrfs.o dgbsv.o \ + dgbsvx.o dgbtf2.o dgbtrf.o dgbtrs.o dgebak.o dgebal.o dgebd2.o \ + dgebrd.o dgecon.o dgeequ.o dgees.o dgeesx.o dgeev.o dgeevx.o \ + dgehd2.o dgehrd.o dgelq2.o dgelqf.o \ + dgels.o dgelsd.o dgelss.o dgelsy.o dgeql2.o dgeqlf.o \ + dgeqp3.o dgeqr2.o dgeqr2p.o dgeqrf.o dgeqrfp.o dgerfs.o dgerq2.o dgerqf.o \ + dgesc2.o dgesdd.o dgesv.o dgesvd.o dgesvdx.o dgesvx.o dgetc2.o dgetf2.o \ + dgetrf.o dgetrf2.o dgetri.o \ + dgetrs.o dggbak.o dggbal.o \ + dgges.o dgges3.o dggesx.o dggev.o dggev3.o dggevx.o \ + dggglm.o dgghrd.o dgghd3.o dgglse.o dggqrf.o \ + dggrqf.o dggsvd3.o dggsvp3.o dgtcon.o dgtrfs.o dgtsv.o \ + dgtsvx.o dgttrf.o dgttrs.o dgtts2.o dhgeqz.o \ + dhsein.o dhseqr.o dlabrd.o dlacon.o dlacn2.o \ + dlaein.o dlaexc.o dlag2.o dlags2.o dlagtm.o dlagv2.o dlahqr.o \ + dlahr2.o dlaic1.o dlaln2.o dlals0.o dlalsa.o dlalsd.o \ + dlangb.o dlange.o dlangt.o dlanhs.o dlansb.o dlansp.o \ + dlansy.o dlantb.o dlantp.o dlantr.o dlanv2.o \ + dlapll.o dlapmt.o \ + dlaqgb.o dlaqge.o dlaqp2.o dlaqps.o dlaqsb.o dlaqsp.o dlaqsy.o \ + dlaqr0.o dlaqr1.o dlaqr2.o dlaqr3.o dlaqr4.o dlaqr5.o \ + dlaqtr.o dlar1v.o dlar2v.o iladlr.o iladlc.o \ + dlarf.o dlarfb.o dlarfg.o dlarfgp.o dlarft.o dlarfx.o dlarfy.o \ + dlargv.o dlarrv.o dlartv.o \ + dlarz.o dlarzb.o dlarzt.o dlaswp.o dlasy2.o \ + dlasyf.o dlasyf_rook.o dlasyf_rk.o dlasyf_aa.o \ + dlatbs.o dlatdf.o dlatps.o dlatrd.o dlatrs.o dlatrz.o dlauu2.o \ + dlauum.o dopgtr.o dopmtr.o dorg2l.o dorg2r.o \ + dorgbr.o dorghr.o dorgl2.o dorglq.o dorgql.o dorgqr.o dorgr2.o \ + dorgrq.o dorgtr.o dorgtsqr.o dorm2l.o dorm2r.o dorm22.o \ + dormbr.o dormhr.o dorml2.o dormlq.o dormql.o dormqr.o dormr2.o \ + dormr3.o dormrq.o dormrz.o dormtr.o dpbcon.o dpbequ.o dpbrfs.o \ + dpbstf.o dpbsv.o dpbsvx.o \ + dpbtf2.o dpbtrf.o dpbtrs.o dpocon.o dpoequ.o dporfs.o dposv.o \ + dposvx.o dpotf2.o dpotrf.o dpotrf2.o dpotri.o dpotrs.o dpstrf.o dpstf2.o \ + dppcon.o dppequ.o \ + dpprfs.o dppsv.o dppsvx.o dpptrf.o dpptri.o dpptrs.o dptcon.o \ + dpteqr.o dptrfs.o dptsv.o dptsvx.o dpttrs.o dptts2.o drscl.o \ + dsbev.o dsbevd.o dsbevx.o dsbgst.o dsbgv.o dsbgvd.o dsbgvx.o \ + dsbtrd.o dspcon.o dspev.o dspevd.o dspevx.o dspgst.o \ + dspgv.o dspgvd.o dspgvx.o dsprfs.o dspsv.o dspsvx.o dsptrd.o \ + dsptrf.o dsptri.o dsptrs.o dstegr.o dstein.o dstev.o dstevd.o dstevr.o \ + dstevx.o dsycon.o dsyev.o dsyevd.o dsyevr.o \ + dsyevx.o dsygs2.o dsygst.o dsygv.o dsygvd.o dsygvx.o dsyrfs.o \ + dsysv.o dsysvx.o \ + dsytd2.o dsytf2.o dsytrd.o dsytrf.o dsytri.o dsytrs.o dsytrs2.o \ + dsytri2.o dsytri2x.o dsyswapr.o \ + dsyconv.o dsyconvf.o dsyconvf_rook.o \ + dsytf2_rook.o dsytrf_rook.o dsytrs_rook.o \ + dsytri_rook.o dsycon_rook.o dsysv_rook.o \ + dsytf2_rk.o dsytrf_rk.o dsytrs_3.o \ + dsytri_3.o dsytri_3x.o dsycon_3.o dsysv_rk.o \ + dsysv_aa.o dsytrf_aa.o dsytrs_aa.o \ + dsysv_aa_2stage.o dsytrf_aa_2stage.o dsytrs_aa_2stage.o \ + dtbcon.o \ + dtbrfs.o dtbtrs.o dtgevc.o dtgex2.o dtgexc.o dtgsen.o \ + dtgsja.o dtgsna.o dtgsy2.o dtgsyl.o dtpcon.o dtprfs.o dtptri.o \ + dtptrs.o \ + dtrcon.o dtrevc.o dtrevc3.o dtrexc.o dtrrfs.o dtrsen.o dtrsna.o dtrsyl.o \ + dtrti2.o dtrtri.o dtrtrs.o dtzrzf.o dstemr.o \ + dsgesv.o dsposv.o dlag2s.o slag2d.o dlat2s.o \ + dlansf.o dpftrf.o dpftri.o dpftrs.o dsfrk.o dtfsm.o dtftri.o dtfttp.o \ + dtfttr.o dtpttf.o dtpttr.o dtrttf.o dtrttp.o \ + dgejsv.o dgesvj.o dgsvj0.o dgsvj1.o \ + dgeequb.o dsyequb.o dpoequb.o dgbequb.o \ + dbbcsd.o dlapmr.o dorbdb.o dorbdb1.o dorbdb2.o dorbdb3.o dorbdb4.o \ + dorbdb5.o dorbdb6.o dorcsd.o dorcsd2by1.o \ + dgeqrt.o dgeqrt2.o dgeqrt3.o dgemqrt.o \ + dtpqrt.o dtpqrt2.o dtpmqrt.o dtprfb.o \ + dgelqt.o dgelqt3.o dgemlqt.o \ + dgetsls.o dgeqr.o dlatsqr.o dlamtsqr.o dgemqr.o \ + dgelq.o dlaswlq.o dlamswlq.o dgemlq.o \ + dtplqt.o dtplqt2.o dtpmlqt.o \ + dorhr_col.o dlaorhr_col_getrfnp.o dlaorhr_col_getrfnp2.o \ + dsytrd_2stage.o dsytrd_sy2sb.o dsytrd_sb2st.o dsb2st_kernels.o \ + dsyevd_2stage.o dsyev_2stage.o dsyevx_2stage.o dsyevr_2stage.o \ + dsbev_2stage.o dsbevx_2stage.o dsbevd_2stage.o dsygv_2stage.o \ + dgesvdq.o dcombssq.o \ + DEPRECATED/dgegs.o DEPRECATED/dgegv.o \ + DEPRECATED/dgeqpf.o DEPRECATED/dgelsx.o DEPRECATED/dggsvd.o \ + DEPRECATED/dggsvp.o DEPRECATED/dlahrd.o DEPRECATED/dlatzm.o DEPRECATED/dtzrqf.o + +ZLASRC = zbdsqr.o zgbbrd.o zgbcon.o zgbequ.o zgbrfs.o zgbsv.o zgbsvx.o \ + zgbtf2.o zgbtrf.o zgbtrs.o zgebak.o zgebal.o zgebd2.o zgebrd.o \ + zgecon.o zgeequ.o zgees.o zgeesx.o zgeev.o zgeevx.o \ + zgehd2.o zgehrd.o zgelq2.o zgelqf.o \ + zgels.o zgelsd.o zgelss.o zgelsy.o zgeql2.o zgeqlf.o zgeqp3.o \ + zgeqr2.o zgeqr2p.o zgeqrf.o zgeqrfp.o zgerfs.o zgerq2.o zgerqf.o \ + zgesc2.o zgesdd.o zgesv.o zgesvd.o zgesvdx.o zgesvx.o \ + zgesvj.o zgejsv.o zgsvj0.o zgsvj1.o \ + zgetc2.o zgetf2.o zgetrf.o zgetrf2.o \ + zgetri.o zgetrs.o \ + zggbak.o zggbal.o \ + zgges.o zgges3.o zggesx.o zggev.o zggev3.o zggevx.o \ + zggglm.o zgghrd.o zgghd3.o zgglse.o zggqrf.o zggrqf.o \ + zggsvd3.o zggsvp3.o \ + zgtcon.o zgtrfs.o zgtsv.o zgtsvx.o zgttrf.o zgttrs.o zgtts2.o zhbev.o \ + zhbevd.o zhbevx.o zhbgst.o zhbgv.o zhbgvd.o zhbgvx.o zhbtrd.o \ + zhecon.o zheev.o zheevd.o zheevr.o zheevx.o zhegs2.o zhegst.o \ + zhegv.o zhegvd.o zhegvx.o zherfs.o zhesv.o zhesvx.o zhetd2.o \ + zhetf2.o zhetrd.o \ + zhetrf.o zhetri.o zhetri2.o zhetri2x.o zheswapr.o \ + zhetrs.o zhetrs2.o \ + zhetf2_rook.o zhetrf_rook.o zhetri_rook.o \ + zhetrs_rook.o zhecon_rook.o zhesv_rook.o \ + zhetf2_rk.o zhetrf_rk.o zhetri_3.o zhetri_3x.o \ + zhetrs_3.o zhecon_3.o zhesv_rk.o \ + zhesv_aa.o zhetrf_aa.o zhetrs_aa.o \ + zhesv_aa_2stage.o zhetrf_aa_2stage.o zhetrs_aa_2stage.o \ + zhgeqz.o zhpcon.o zhpev.o zhpevd.o \ + zhpevx.o zhpgst.o zhpgv.o zhpgvd.o zhpgvx.o zhprfs.o zhpsv.o \ + zhpsvx.o \ + zhptrd.o zhptrf.o zhptri.o zhptrs.o zhsein.o zhseqr.o zlabrd.o \ + zlacgv.o zlacon.o zlacn2.o zlacp2.o zlacpy.o zlacrm.o zlacrt.o zladiv.o \ + zlaed0.o zlaed7.o zlaed8.o \ + zlaein.o zlaesy.o zlaev2.o zlags2.o zlagtm.o \ + zlahef.o zlahef_rook.o zlahef_rk.o zlahef_aa.o zlahqr.o \ + zlahr2.o zlaic1.o zlals0.o zlalsa.o zlalsd.o zlangb.o zlange.o \ + zlangt.o zlanhb.o \ + zlanhe.o \ + zlanhp.o zlanhs.o zlanht.o zlansb.o zlansp.o zlansy.o zlantb.o \ + zlantp.o zlantr.o zlapll.o zlapmt.o zlaqgb.o zlaqge.o \ + zlaqhb.o zlaqhe.o zlaqhp.o zlaqp2.o zlaqps.o zlaqsb.o \ + zlaqr0.o zlaqr1.o zlaqr2.o zlaqr3.o zlaqr4.o zlaqr5.o \ + zlaqsp.o zlaqsy.o zlar1v.o zlar2v.o ilazlr.o ilazlc.o \ + zlarcm.o zlarf.o zlarfb.o \ + zlarfg.o zlarfgp.o zlarft.o \ + zlarfx.o zlarfy.o zlargv.o zlarnv.o zlarrv.o zlartg.o zlartv.o \ + zlarz.o zlarzb.o zlarzt.o zlascl.o zlaset.o zlasr.o \ + zlassq.o zlaswp.o zlasyf.o zlasyf_rook.o zlasyf_rk.o zlasyf_aa.o \ + zlatbs.o zlatdf.o zlatps.o zlatrd.o zlatrs.o zlatrz.o zlauu2.o \ + zlauum.o zpbcon.o zpbequ.o zpbrfs.o zpbstf.o zpbsv.o \ + zpbsvx.o zpbtf2.o zpbtrf.o zpbtrs.o zpocon.o zpoequ.o zporfs.o \ + zposv.o zposvx.o zpotf2.o zpotrf.o zpotrf2.o zpotri.o zpotrs.o zpstrf.o zpstf2.o \ + zppcon.o zppequ.o zpprfs.o zppsv.o zppsvx.o zpptrf.o zpptri.o zpptrs.o \ + zptcon.o zpteqr.o zptrfs.o zptsv.o zptsvx.o zpttrf.o zpttrs.o zptts2.o \ + zrot.o zspcon.o zspmv.o zspr.o zsprfs.o zspsv.o \ + zspsvx.o zsptrf.o zsptri.o zsptrs.o zdrscl.o zstedc.o \ + zstegr.o zstein.o zsteqr.o zsycon.o zsymv.o \ + zsyr.o zsyrfs.o zsysv.o zsysvx.o zsytf2.o zsytrf.o zsytri.o \ + zsytri2.o zsytri2x.o zsyswapr.o \ + zsytrs.o zsytrs2.o \ + zsyconv.o zsyconvf.o zsyconvf_rook.o \ + zsytf2_rook.o zsytrf_rook.o zsytrs_rook.o zsytrs_aa.o \ + zsytri_rook.o zsycon_rook.o zsysv_rook.o \ + zsytf2_rk.o zsytrf_rk.o zsytrf_aa.o zsytrs_3.o \ + zsysv_aa_2stage.o zsytrf_aa_2stage.o zsytrs_aa_2stage.o \ + zsytri_3.o zsytri_3x.o zsycon_3.o zsysv_rk.o zsysv_aa.o \ + ztbcon.o ztbrfs.o ztbtrs.o ztgevc.o ztgex2.o \ + ztgexc.o ztgsen.o ztgsja.o ztgsna.o ztgsy2.o ztgsyl.o ztpcon.o \ + ztprfs.o ztptri.o \ + ztptrs.o ztrcon.o ztrevc.o ztrevc3.o ztrexc.o ztrrfs.o ztrsen.o ztrsna.o \ + ztrsyl.o ztrti2.o ztrtri.o ztrtrs.o ztzrzf.o zung2l.o \ + zung2r.o zungbr.o zunghr.o zungl2.o zunglq.o zungql.o zungqr.o zungr2.o \ + zungrq.o zungtr.o zungtsqr.o zunm2l.o zunm2r.o zunmbr.o zunmhr.o zunml2.o zunm22.o \ + zunmlq.o zunmql.o zunmqr.o zunmr2.o zunmr3.o zunmrq.o zunmrz.o \ + zunmtr.o zupgtr.o \ + zupmtr.o izmax1.o dzsum1.o zstemr.o \ + zcgesv.o zcposv.o zlag2c.o clag2z.o zlat2c.o \ + zhfrk.o ztfttp.o zlanhf.o zpftrf.o zpftri.o zpftrs.o ztfsm.o ztftri.o \ + ztfttr.o ztpttf.o ztpttr.o ztrttf.o ztrttp.o \ + zgeequb.o zgbequb.o zsyequb.o zpoequb.o zheequb.o \ + zbbcsd.o zlapmr.o zunbdb.o zunbdb1.o zunbdb2.o zunbdb3.o zunbdb4.o \ + zunbdb5.o zunbdb6.o zuncsd.o zuncsd2by1.o \ + zgeqrt.o zgeqrt2.o zgeqrt3.o zgemqrt.o \ + ztpqrt.o ztpqrt2.o ztpmqrt.o ztprfb.o \ + ztplqt.o ztplqt2.o ztpmlqt.o \ + zgelqt.o zgelqt3.o zgemlqt.o \ + zgetsls.o zgeqr.o zlatsqr.o zlamtsqr.o zgemqr.o \ + zgelq.o zlaswlq.o zlamswlq.o zgemlq.o \ + zunhr_col.o zlaunhr_col_getrfnp.o zlaunhr_col_getrfnp2.o \ + zhetrd_2stage.o zhetrd_he2hb.o zhetrd_hb2st.o zhb2st_kernels.o \ + zheevd_2stage.o zheev_2stage.o zheevx_2stage.o zheevr_2stage.o \ + zhbev_2stage.o zhbevx_2stage.o zhbevd_2stage.o zhegv_2stage.o \ + zgesvdq.o \ + +SCLAUX = sbdsdc.o \ + sbdsqr.o sdisna.o slabad.o slacpy.o sladiv.o slae2.o slaebz.o \ + slaed0.o slaed1.o slaed2.o slaed3.o slaed4.o slaed5.o slaed6.o \ + slaed7.o slaed8.o slaed9.o slaeda.o slaev2.o slagtf.o \ + slagts.o slamrg.o slanst.o \ + slapy2.o slapy3.o slarnv.o \ + slarra.o slarrb.o slarrc.o slarrd.o slarre.o slarrf.o slarrj.o \ + slarrk.o slarrr.o slaneg.o \ + slartg.o slaruv.o slas2.o slascl.o \ + slasd0.o slasd1.o slasd2.o slasd3.o slasd4.o slasd5.o slasd6.o \ + slasd7.o slasd8.o slasda.o slasdq.o slasdt.o \ + slaset.o slasq1.o slasq2.o slasq3.o slasq4.o slasq5.o slasq6.o \ + slasr.o slasrt.o slassq.o slasv2.o spttrf.o sstebz.o sstedc.o \ + ssteqr.o ssterf.o slaisnan.o sisnan.o \ + slartgp.o slartgs.o \ + ../INSTALL/slamch.o ../INSTALL/second_INT_ETIME.o \ + DEPRECATED/zgegs.o DEPRECATED/zgegv.o \ + DEPRECATED/zgeqpf.o DEPRECATED/zgelsx.o DEPRECATED/zggsvd.o \ + DEPRECATED/zggsvp.o DEPRECATED/zlahrd.o DEPRECATED/zlatzm.o DEPRECATED/ztzrqf.o + +DZLAUX = dbdsdc.o \ + dbdsqr.o ddisna.o dlabad.o dlacpy.o dladiv.o dlae2.o dlaebz.o \ + dlaed0.o dlaed1.o dlaed2.o dlaed3.o dlaed4.o dlaed5.o dlaed6.o \ + dlaed7.o dlaed8.o dlaed9.o dlaeda.o dlaev2.o dlagtf.o \ + dlagts.o dlamrg.o dlanst.o \ + dlapy2.o dlapy3.o dlarnv.o \ + dlarra.o dlarrb.o dlarrc.o dlarrd.o dlarre.o dlarrf.o dlarrj.o \ + dlarrk.o dlarrr.o dlaneg.o \ + dlartg.o dlaruv.o dlas2.o dlascl.o \ + dlasd0.o dlasd1.o dlasd2.o dlasd3.o dlasd4.o dlasd5.o dlasd6.o \ + dlasd7.o dlasd8.o dlasda.o dlasdq.o dlasdt.o \ + dlaset.o dlasq1.o dlasq2.o dlasq3.o dlasq4.o dlasq5.o dlasq6.o \ + dlasr.o dlasrt.o dlassq.o dlasv2.o dpttrf.o dstebz.o dstedc.o \ + dsteqr.o dsterf.o dlaisnan.o disnan.o \ + dlartgp.o dlartgs.o \ + ../INSTALL/dlamch.o ../INSTALL/dsecnd_INT_ETIME.o + +ALLAUX = ilaenv.o ilaenv2stage.o ieeeck.o lsamen.o iparmq.o iparam2stage.o \ + ilaprec.o ilatrans.o ilauplo.o iladiag.o chla_transtype.o \ + ../INSTALL/ilaver.o ../INSTALL/lsame.o xerbla.o xerbla_array.o + +SXLASRC = sgesvxx.o sgerfsx.o sla_gerfsx_extended.o sla_geamv.o \ + sla_gercond.o sla_gerpvgrw.o ssysvxx.o ssyrfsx.o \ + sla_syrfsx_extended.o sla_syamv.o sla_syrcond.o sla_syrpvgrw.o \ + sposvxx.o sporfsx.o sla_porfsx_extended.o sla_porcond.o \ + sla_porpvgrw.o sgbsvxx.o sgbrfsx.o sla_gbrfsx_extended.o \ + sla_gbamv.o sla_gbrcond.o sla_gbrpvgrw.o sla_lin_berr.o slarscl2.o \ + slascl2.o sla_wwaddw.o + +DXLASRC = dgesvxx.o dgerfsx.o dla_gerfsx_extended.o dla_geamv.o \ + dla_gercond.o dla_gerpvgrw.o dsysvxx.o dsyrfsx.o \ + dla_syrfsx_extended.o dla_syamv.o dla_syrcond.o dla_syrpvgrw.o \ + dposvxx.o dporfsx.o dla_porfsx_extended.o dla_porcond.o \ + dla_porpvgrw.o dgbsvxx.o dgbrfsx.o dla_gbrfsx_extended.o \ + dla_gbamv.o dla_gbrcond.o dla_gbrpvgrw.o dla_lin_berr.o dlarscl2.o \ + dlascl2.o dla_wwaddw.o + +CXLASRC = cgesvxx.o cgerfsx.o cla_gerfsx_extended.o cla_geamv.o \ + cla_gercond_c.o cla_gercond_x.o cla_gerpvgrw.o \ + csysvxx.o csyrfsx.o cla_syrfsx_extended.o cla_syamv.o \ + cla_syrcond_c.o cla_syrcond_x.o cla_syrpvgrw.o \ + cposvxx.o cporfsx.o cla_porfsx_extended.o \ + cla_porcond_c.o cla_porcond_x.o cla_porpvgrw.o \ + cgbsvxx.o cgbrfsx.o cla_gbrfsx_extended.o cla_gbamv.o \ + cla_gbrcond_c.o cla_gbrcond_x.o cla_gbrpvgrw.o \ + chesvxx.o cherfsx.o cla_herfsx_extended.o cla_heamv.o \ + cla_hercond_c.o cla_hercond_x.o cla_herpvgrw.o \ + cla_lin_berr.o clarscl2.o clascl2.o cla_wwaddw.o + +ZCLASRC = cpotrs.o cgetrs.o cpotrf.o cgetrf.o + +ZXLASRC = zgesvxx.o zgerfsx.o zla_gerfsx_extended.o zla_geamv.o \ + zla_gercond_c.o zla_gercond_x.o zla_gerpvgrw.o zsysvxx.o zsyrfsx.o \ + zla_syrfsx_extended.o zla_syamv.o zla_syrcond_c.o zla_syrcond_x.o \ + zla_syrpvgrw.o zposvxx.o zporfsx.o zla_porfsx_extended.o \ + zla_porcond_c.o zla_porcond_x.o zla_porpvgrw.o zgbsvxx.o zgbrfsx.o \ + zla_gbrfsx_extended.o zla_gbamv.o zla_gbrcond_c.o zla_gbrcond_x.o \ + zla_gbrpvgrw.o zhesvxx.o zherfsx.o zla_herfsx_extended.o \ + zla_heamv.o zla_hercond_c.o zla_hercond_x.o zla_herpvgrw.o \ + zla_lin_berr.o zlarscl2.o zlascl2.o zla_wwaddw.o + +ALLXAUX = + +ALLOBJ=$(SLASRC) $(DLASRC) $(CLASRC) $(ZLASRC) $(SCLAUX) $(DZLAUX) $(ALLAUX) $(DSLASRC) $(ZCLASRC) + +ifdef USEXBLAS +ALLXOBJ=$(SXLASRC) $(DXLASRC) $(CXLASRC) $(ZXLASRC) $(ALLXAUX) +endif + +static: $(ALLOBJ) $(ALLXOBJ) +ifeq "$(SYMBOLSUFFIX)" "" + ar ruv liblapack.a $(ALLOBJ) $(ALLXOBJ) + ranlib liblapack.a +else + ar ruv liblapack$(SYMBOLSUFFIX).a $(ALLOBJ) $(ALLXOBJ) + ranlib liblapack$(SYMBOLSUFFIX).a + for i in `nm liblapack$(SYMBOLSUFFIX).a |grep " T " | awk '{print $$3}'`; do echo "$$i" "64_$$i"; done > lapack-static-prefix.def.dirty + sort -n lapack-static-prefix.def.dirty | uniq > lapack-static-prefix.def + $(OBJCOPY) --redefine-syms lapack-static-prefix.def liblapack$(SYMBOLSUFFIX).a liblapack$(SYMBOLSUFFIX).a.fixed + rm -rf liblapack$(SYMBOLSUFFIX).a + mv liblapack$(SYMBOLSUFFIX).a.fixed liblapack$(SYMBOLSUFFIX).a +endif + +shared: $(ALLOBJ) $(ALLXOBJ) +ifeq "$(SYMBOLSUFFIX)" "" + # No renaming needed + cc $(CFLAGS) -shared -Wl,-soname,liblapack.so.@SHORTVER@ -o liblapack.so.@LONGVER@ $(ALLOBJ) $(ALLXOBJ) $(LDFLAGS) -L.. -lblas -lm -lgfortran -lc +else + cc $(CFLAGS) -shared -Wl,-soname,liblapack$(SYMBOLSUFFIX).so.@SHORTVER@ -o liblapack$(SYMBOLSUFFIX).so.@LONGVER@ $(ALLOBJ) $(ALLXOBJ) $(LDFLAGS) -L.. -lblas -lm -lgfortran -lc + # generate a list of all symbols in shared library and rename with SYMBOLSUFFIX + for i in `readelf -Ws liblapack$(SYMBOLSUFFIX).so.@LONGVER@ | awk '{print $$8}' | grep -v GLIBC |grep -v GFORTRAN |grep -v "Name" `; do echo "$$i" "64_$$i"; done > lapack-prefix.def.dirty + sort -n lapack-prefix.def.dirty | uniq > lapack-prefix.def + $(OBJCOPY) --redefine-syms lapack-prefix.def liblapack$(SYMBOLSUFFIX).so.@LONGVER@ liblapack$(SYMBOLSUFFIX).so.@LONGVER@.fixed + rm -rf liblapack$(SYMBOLSUFFIX).so.@LONGVER@ + mv liblapack$(SYMBOLSUFFIX).so.@LONGVER@.fixed liblapack$(SYMBOLSUFFIX).so.@LONGVER@ +endif diff --git a/blas-bz143340.patch b/blas-bz143340.patch new file mode 100644 index 0000000..7581a5d --- /dev/null +++ b/blas-bz143340.patch @@ -0,0 +1,801 @@ +--- LAPACK/BLAS/SRC/drotmg.f.BAD 2005-09-28 17:59:56.000000000 -0500 ++++ LAPACK/BLAS/SRC/drotmg.f 2005-09-28 18:01:04.000000000 -0500 +@@ -0,0 +1,169 @@ ++ SUBROUTINE DROTMG (DD1,DD2,DX1,DY1,DPARAM) ++C ++C CONSTRUCT THE MODIFIED GIVENS TRANSFORMATION MATRIX H WHICH ZEROS ++C THE SECOND COMPONENT OF THE 2-VECTOR (DSQRT(DD1)*DX1,DSQRT(DD2)* ++C DY2)**T. ++C WITH DPARAM(1)=DFLAG, H HAS ONE OF THE FOLLOWING FORMS.. ++C ++C DFLAG=-1.D0 DFLAG=0.D0 DFLAG=1.D0 DFLAG=-2.D0 ++C ++C (DH11 DH12) (1.D0 DH12) (DH11 1.D0) (1.D0 0.D0) ++C H=( ) ( ) ( ) ( ) ++C (DH21 DH22), (DH21 1.D0), (-1.D0 DH22), (0.D0 1.D0). ++C LOCATIONS 2-4 OF DPARAM CONTAIN DH11, DH21, DH12, AND DH22 ++C RESPECTIVELY. (VALUES OF 1.D0, -1.D0, OR 0.D0 IMPLIED BY THE ++C VALUE OF DPARAM(1) ARE NOT STORED IN DPARAM.) ++C ++C THE VALUES OF GAMSQ AND RGAMSQ SET IN THE DATA STATEMENT MAY BE ++C INEXACT. THIS IS OK AS THEY ARE ONLY USED FOR TESTING THE SIZE ++C OF DD1 AND DD2. ALL ACTUAL SCALING OF DATA IS DONE USING GAM. ++C ++ DOUBLE PRECISION GAM,ONE,RGAMSQ,DD2,DH11,DH21,DPARAM,DP2, ++ 1 DQ2,DU,DY1,ZERO,GAMSQ,DD1,DFLAG,DH12,DH22,DP1,DQ1, ++ 2 DTEMP,DX1,TWO ++ DIMENSION DPARAM(5) ++C ++ DATA ZERO,ONE,TWO /0.D0,1.D0,2.D0/ ++ DATA GAM,GAMSQ,RGAMSQ/4096.D0,16777216.D0,5.9604645D-8/ ++ IF(.NOT. DD1 .LT. ZERO) GO TO 10 ++C GO ZERO-H-D-AND-DX1.. ++ GO TO 60 ++ 10 CONTINUE ++C CASE-DD1-NONNEGATIVE ++ DP2=DD2*DY1 ++ IF(.NOT. DP2 .EQ. ZERO) GO TO 20 ++ DFLAG=-TWO ++ GO TO 260 ++C REGULAR-CASE.. ++ 20 CONTINUE ++ DP1=DD1*DX1 ++ DQ2=DP2*DY1 ++ DQ1=DP1*DX1 ++C ++ IF(.NOT. DABS(DQ1) .GT. DABS(DQ2)) GO TO 40 ++ DH21=-DY1/DX1 ++ DH12=DP2/DP1 ++C ++ DU=ONE-DH12*DH21 ++C ++ IF(.NOT. DU .LE. ZERO) GO TO 30 ++C GO ZERO-H-D-AND-DX1.. ++ GO TO 60 ++ 30 CONTINUE ++ DFLAG=ZERO ++ DD1=DD1/DU ++ DD2=DD2/DU ++ DX1=DX1*DU ++C GO SCALE-CHECK.. ++ GO TO 100 ++ 40 CONTINUE ++ IF(.NOT. DQ2 .LT. ZERO) GO TO 50 ++C GO ZERO-H-D-AND-DX1.. ++ GO TO 60 ++ 50 CONTINUE ++ DFLAG=ONE ++ DH11=DP1/DP2 ++ DH22=DX1/DY1 ++ DU=ONE+DH11*DH22 ++ DTEMP=DD2/DU ++ DD2=DD1/DU ++ DD1=DTEMP ++ DX1=DY1*DU ++C GO SCALE-CHECK ++ GO TO 100 ++C PROCEDURE..ZERO-H-D-AND-DX1.. ++ 60 CONTINUE ++ DFLAG=-ONE ++ DH11=ZERO ++ DH12=ZERO ++ DH21=ZERO ++ DH22=ZERO ++C ++ DD1=ZERO ++ DD2=ZERO ++ DX1=ZERO ++C RETURN.. ++ GO TO 220 ++C PROCEDURE..FIX-H.. ++ 70 CONTINUE ++ IF(.NOT. DFLAG .GE. ZERO) GO TO 90 ++C ++ IF(.NOT. DFLAG .EQ. ZERO) GO TO 80 ++ DH11=ONE ++ DH22=ONE ++ DFLAG=-ONE ++ GO TO 90 ++ 80 CONTINUE ++ DH21=-ONE ++ DH12=ONE ++ DFLAG=-ONE ++ 90 CONTINUE ++ GO TO IGO,(120,150,180,210) ++C PROCEDURE..SCALE-CHECK ++ 100 CONTINUE ++ 110 CONTINUE ++ IF(.NOT. DD1 .LE. RGAMSQ) GO TO 130 ++ IF(DD1 .EQ. ZERO) GO TO 160 ++ ASSIGN 120 TO IGO ++C FIX-H.. ++ GO TO 70 ++ 120 CONTINUE ++ DD1=DD1*GAM**2 ++ DX1=DX1/GAM ++ DH11=DH11/GAM ++ DH12=DH12/GAM ++ GO TO 110 ++ 130 CONTINUE ++ 140 CONTINUE ++ IF(.NOT. DD1 .GE. GAMSQ) GO TO 160 ++ ASSIGN 150 TO IGO ++C FIX-H.. ++ GO TO 70 ++ 150 CONTINUE ++ DD1=DD1/GAM**2 ++ DX1=DX1*GAM ++ DH11=DH11*GAM ++ DH12=DH12*GAM ++ GO TO 140 ++ 160 CONTINUE ++ 170 CONTINUE ++ IF(.NOT. DABS(DD2) .LE. RGAMSQ) GO TO 190 ++ IF(DD2 .EQ. ZERO) GO TO 220 ++ ASSIGN 180 TO IGO ++C FIX-H.. ++ GO TO 70 ++ 180 CONTINUE ++ DD2=DD2*GAM**2 ++ DH21=DH21/GAM ++ DH22=DH22/GAM ++ GO TO 170 ++ 190 CONTINUE ++ 200 CONTINUE ++ IF(.NOT. DABS(DD2) .GE. GAMSQ) GO TO 220 ++ ASSIGN 210 TO IGO ++C FIX-H.. ++ GO TO 70 ++ 210 CONTINUE ++ DD2=DD2/GAM**2 ++ DH21=DH21*GAM ++ DH22=DH22*GAM ++ GO TO 200 ++ 220 CONTINUE ++ IF(DFLAG)250,230,240 ++ 230 CONTINUE ++ DPARAM(3)=DH21 ++ DPARAM(4)=DH12 ++ GO TO 260 ++ 240 CONTINUE ++ DPARAM(2)=DH11 ++ DPARAM(5)=DH22 ++ GO TO 260 ++ 250 CONTINUE ++ DPARAM(2)=DH11 ++ DPARAM(3)=DH21 ++ DPARAM(4)=DH12 ++ DPARAM(5)=DH22 ++ 260 CONTINUE ++ DPARAM(1)=DFLAG ++ RETURN ++ END +--- LAPACK/BLAS/SRC/sdsdot.f.BAD 2005-09-28 18:00:11.000000000 -0500 ++++ LAPACK/BLAS/SRC/sdsdot.f 2005-09-28 18:01:23.000000000 -0500 +@@ -0,0 +1,78 @@ ++*DECK SDSDOT ++ REAL FUNCTION SDSDOT (N, SB, SX, INCX, SY, INCY) ++C***BEGIN PROLOGUE SDSDOT ++C***PURPOSE Compute the inner product of two vectors with extended ++C precision accumulation. ++C***LIBRARY SLATEC (BLAS) ++C***CATEGORY D1A4 ++C***TYPE SINGLE PRECISION (SDSDOT-S, CDCDOT-C) ++C***KEYWORDS BLAS, DOT PRODUCT, INNER PRODUCT, LINEAR ALGEBRA, VECTOR ++C***AUTHOR Lawson, C. L., (JPL) ++C Hanson, R. J., (SNLA) ++C Kincaid, D. R., (U. of Texas) ++C Krogh, F. T., (JPL) ++C***DESCRIPTION ++C ++C B L A S Subprogram ++C Description of Parameters ++C ++C --Input-- ++C N number of elements in input vector(s) ++C SB single precision scalar to be added to inner product ++C SX single precision vector with N elements ++C INCX storage spacing between elements of SX ++C SY single precision vector with N elements ++C INCY storage spacing between elements of SY ++C ++C --Output-- ++C SDSDOT single precision dot product (SB if N .LE. 0) ++C ++C Returns S.P. result with dot product accumulated in D.P. ++C SDSDOT = SB + sum for I = 0 to N-1 of SX(LX+I*INCX)*SY(LY+I*INCY), ++C where LX = 1 if INCX .GE. 0, else LX = 1+(1-N)*INCX, and LY is ++C defined in a similar way using INCY. ++C ++C***REFERENCES C. L. Lawson, R. J. Hanson, D. R. Kincaid and F. T. ++C Krogh, Basic linear algebra subprograms for Fortran ++C usage, Algorithm No. 539, Transactions on Mathematical ++C Software 5, 3 (September 1979), pp. 308-323. ++C***ROUTINES CALLED (NONE) ++C***REVISION HISTORY (YYMMDD) ++C 791001 DATE WRITTEN ++C 890531 Changed all specific intrinsics to generic. (WRB) ++C 890831 Modified array declarations. (WRB) ++C 890831 REVISION DATE from Version 3.2 ++C 891214 Prologue converted to Version 4.0 format. (BAB) ++C 920310 Corrected definition of LX in DESCRIPTION. (WRB) ++C 920501 Reformatted the REFERENCES section. (WRB) ++C***END PROLOGUE SDSDOT ++ REAL SX(*), SY(*), SB ++ DOUBLE PRECISION DSDOT ++C***FIRST EXECUTABLE STATEMENT SDSDOT ++ DSDOT = SB ++ IF (N .LE. 0) GO TO 30 ++ IF (INCX.EQ.INCY .AND. INCX.GT.0) GO TO 40 ++C ++C Code for unequal or nonpositive increments. ++C ++ KX = 1 ++ KY = 1 ++ IF (INCX .LT. 0) KX = 1+(1-N)*INCX ++ IF (INCY .LT. 0) KY = 1+(1-N)*INCY ++ DO 10 I = 1,N ++ DSDOT = DSDOT + DBLE(SX(KX))*DBLE(SY(KY)) ++ KX = KX + INCX ++ KY = KY + INCY ++ 10 CONTINUE ++ 30 SDSDOT = DSDOT ++ RETURN ++C ++C Code for equal and positive increments. ++C ++ 40 NS = N*INCX ++ DO 50 I = 1,NS,INCX ++ DSDOT = DSDOT + DBLE(SX(I))*DBLE(SY(I)) ++ 50 CONTINUE ++ SDSDOT = DSDOT ++ RETURN ++ END +--- LAPACK/BLAS/SRC/csrot.f.BAD 2005-09-28 17:59:45.000000000 -0500 ++++ LAPACK/BLAS/SRC/csrot.f 2005-09-28 18:00:41.000000000 -0500 +@@ -0,0 +1,38 @@ ++ subroutine csrot (n,cx,incx,cy,incy,c,s) ++c ++c applies a plane rotation, where the cos and sin (c and s) are real ++c and the vectors cx and cy are complex. ++c jack dongarra, linpack, 3/11/78. ++c ++ complex cx(1),cy(1),ctemp ++ real c,s ++ integer i,incx,incy,ix,iy,n ++c ++ if(n.le.0)return ++ if(incx.eq.1.and.incy.eq.1)go to 20 ++c ++c code for unequal increments or equal increments not equal ++c to 1 ++c ++ ix = 1 ++ iy = 1 ++ if(incx.lt.0)ix = (-n+1)*incx + 1 ++ if(incy.lt.0)iy = (-n+1)*incy + 1 ++ do 10 i = 1,n ++ ctemp = c*cx(ix) + s*cy(iy) ++ cy(iy) = c*cy(iy) - s*cx(ix) ++ cx(ix) = ctemp ++ ix = ix + incx ++ iy = iy + incy ++ 10 continue ++ return ++c ++c code for both increments equal to 1 ++c ++ 20 do 30 i = 1,n ++ ctemp = c*cx(i) + s*cy(i) ++ cy(i) = c*cy(i) - s*cx(i) ++ cx(i) = ctemp ++ 30 continue ++ return ++ end +--- LAPACK/BLAS/SRC/srotmg.f.BAD 2005-09-28 18:00:24.000000000 -0500 ++++ LAPACK/BLAS/SRC/srotmg.f 2005-09-28 18:01:45.000000000 -0500 +@@ -0,0 +1,166 @@ ++ SUBROUTINE SROTMG (SD1,SD2,SX1,SY1,SPARAM) ++C ++C CONSTRUCT THE MODIFIED GIVENS TRANSFORMATION MATRIX H WHICH ZEROS ++C THE SECOND COMPONENT OF THE 2-VECTOR (SQRT(SD1)*SX1,SQRT(SD2)* ++C SY2)**T. ++C WITH SPARAM(1)=SFLAG, H HAS ONE OF THE FOLLOWING FORMS.. ++C ++C SFLAG=-1.E0 SFLAG=0.E0 SFLAG=1.E0 SFLAG=-2.E0 ++C ++C (SH11 SH12) (1.E0 SH12) (SH11 1.E0) (1.E0 0.E0) ++C H=( ) ( ) ( ) ( ) ++C (SH21 SH22), (SH21 1.E0), (-1.E0 SH22), (0.E0 1.E0). ++C LOCATIONS 2-4 OF SPARAM CONTAIN SH11,SH21,SH12, AND SH22 ++C RESPECTIVELY. (VALUES OF 1.E0, -1.E0, OR 0.E0 IMPLIED BY THE ++C VALUE OF SPARAM(1) ARE NOT STORED IN SPARAM.) ++C ++C THE VALUES OF GAMSQ AND RGAMSQ SET IN THE DATA STATEMENT MAY BE ++C INEXACT. THIS IS OK AS THEY ARE ONLY USED FOR TESTING THE SIZE ++C OF SD1 AND SD2. ALL ACTUAL SCALING OF DATA IS DONE USING GAM. ++C ++ DIMENSION SPARAM(5) ++C ++ DATA ZERO,ONE,TWO /0.E0,1.E0,2.E0/ ++ DATA GAM,GAMSQ,RGAMSQ/4096.E0,1.67772E7,5.96046E-8/ ++ IF(.NOT. SD1 .LT. ZERO) GO TO 10 ++C GO ZERO-H-D-AND-SX1.. ++ GO TO 60 ++ 10 CONTINUE ++C CASE-SD1-NONNEGATIVE ++ SP2=SD2*SY1 ++ IF(.NOT. SP2 .EQ. ZERO) GO TO 20 ++ SFLAG=-TWO ++ GO TO 260 ++C REGULAR-CASE.. ++ 20 CONTINUE ++ SP1=SD1*SX1 ++ SQ2=SP2*SY1 ++ SQ1=SP1*SX1 ++C ++ IF(.NOT. ABS(SQ1) .GT. ABS(SQ2)) GO TO 40 ++ SH21=-SY1/SX1 ++ SH12=SP2/SP1 ++C ++ SU=ONE-SH12*SH21 ++C ++ IF(.NOT. SU .LE. ZERO) GO TO 30 ++C GO ZERO-H-D-AND-SX1.. ++ GO TO 60 ++ 30 CONTINUE ++ SFLAG=ZERO ++ SD1=SD1/SU ++ SD2=SD2/SU ++ SX1=SX1*SU ++C GO SCALE-CHECK.. ++ GO TO 100 ++ 40 CONTINUE ++ IF(.NOT. SQ2 .LT. ZERO) GO TO 50 ++C GO ZERO-H-D-AND-SX1.. ++ GO TO 60 ++ 50 CONTINUE ++ SFLAG=ONE ++ SH11=SP1/SP2 ++ SH22=SX1/SY1 ++ SU=ONE+SH11*SH22 ++ STEMP=SD2/SU ++ SD2=SD1/SU ++ SD1=STEMP ++ SX1=SY1*SU ++C GO SCALE-CHECK ++ GO TO 100 ++C PROCEDURE..ZERO-H-D-AND-SX1.. ++ 60 CONTINUE ++ SFLAG=-ONE ++ SH11=ZERO ++ SH12=ZERO ++ SH21=ZERO ++ SH22=ZERO ++C ++ SD1=ZERO ++ SD2=ZERO ++ SX1=ZERO ++C RETURN.. ++ GO TO 220 ++C PROCEDURE..FIX-H.. ++ 70 CONTINUE ++ IF(.NOT. SFLAG .GE. ZERO) GO TO 90 ++C ++ IF(.NOT. SFLAG .EQ. ZERO) GO TO 80 ++ SH11=ONE ++ SH22=ONE ++ SFLAG=-ONE ++ GO TO 90 ++ 80 CONTINUE ++ SH21=-ONE ++ SH12=ONE ++ SFLAG=-ONE ++ 90 CONTINUE ++ GO TO IGO,(120,150,180,210) ++C PROCEDURE..SCALE-CHECK ++ 100 CONTINUE ++ 110 CONTINUE ++ IF(.NOT. SD1 .LE. RGAMSQ) GO TO 130 ++ IF(SD1 .EQ. ZERO) GO TO 160 ++ ASSIGN 120 TO IGO ++C FIX-H.. ++ GO TO 70 ++ 120 CONTINUE ++ SD1=SD1*GAM**2 ++ SX1=SX1/GAM ++ SH11=SH11/GAM ++ SH12=SH12/GAM ++ GO TO 110 ++ 130 CONTINUE ++ 140 CONTINUE ++ IF(.NOT. SD1 .GE. GAMSQ) GO TO 160 ++ ASSIGN 150 TO IGO ++C FIX-H.. ++ GO TO 70 ++ 150 CONTINUE ++ SD1=SD1/GAM**2 ++ SX1=SX1*GAM ++ SH11=SH11*GAM ++ SH12=SH12*GAM ++ GO TO 140 ++ 160 CONTINUE ++ 170 CONTINUE ++ IF(.NOT. ABS(SD2) .LE. RGAMSQ) GO TO 190 ++ IF(SD2 .EQ. ZERO) GO TO 220 ++ ASSIGN 180 TO IGO ++C FIX-H.. ++ GO TO 70 ++ 180 CONTINUE ++ SD2=SD2*GAM**2 ++ SH21=SH21/GAM ++ SH22=SH22/GAM ++ GO TO 170 ++ 190 CONTINUE ++ 200 CONTINUE ++ IF(.NOT. ABS(SD2) .GE. GAMSQ) GO TO 220 ++ ASSIGN 210 TO IGO ++C FIX-H.. ++ GO TO 70 ++ 210 CONTINUE ++ SD2=SD2/GAM**2 ++ SH21=SH21*GAM ++ SH22=SH22*GAM ++ GO TO 200 ++ 220 CONTINUE ++ IF(SFLAG)250,230,240 ++ 230 CONTINUE ++ SPARAM(3)=SH21 ++ SPARAM(4)=SH12 ++ GO TO 260 ++ 240 CONTINUE ++ SPARAM(2)=SH11 ++ SPARAM(5)=SH22 ++ GO TO 260 ++ 250 CONTINUE ++ SPARAM(2)=SH11 ++ SPARAM(3)=SH21 ++ SPARAM(4)=SH12 ++ SPARAM(5)=SH22 ++ 260 CONTINUE ++ SPARAM(1)=SFLAG ++ RETURN ++ END +--- LAPACK/BLAS/SRC/dsdot.f.BAD 2005-09-28 18:00:03.000000000 -0500 ++++ LAPACK/BLAS/SRC/dsdot.f 2005-09-28 18:01:11.000000000 -0500 +@@ -0,0 +1,74 @@ ++*DECK DSDOT ++ DOUBLE PRECISION FUNCTION DSDOT (N, SX, INCX, SY, INCY) ++C***BEGIN PROLOGUE DSDOT ++C***PURPOSE Compute the inner product of two vectors with extended ++C precision accumulation and result. ++C***LIBRARY SLATEC (BLAS) ++C***CATEGORY D1A4 ++C***TYPE DOUBLE PRECISION (DSDOT-D, DCDOT-C) ++C***KEYWORDS BLAS, COMPLEX VECTORS, DOT PRODUCT, INNER PRODUCT, ++C LINEAR ALGEBRA, VECTOR ++C***AUTHOR Lawson, C. L., (JPL) ++C Hanson, R. J., (SNLA) ++C Kincaid, D. R., (U. of Texas) ++C Krogh, F. T., (JPL) ++C***DESCRIPTION ++C ++C B L A S Subprogram ++C Description of Parameters ++C ++C --Input-- ++C N number of elements in input vector(s) ++C SX single precision vector with N elements ++C INCX storage spacing between elements of SX ++C SY single precision vector with N elements ++C INCY storage spacing between elements of SY ++C ++C --Output-- ++C DSDOT double precision dot product (zero if N.LE.0) ++C ++C Returns D.P. dot product accumulated in D.P., for S.P. SX and SY ++C DSDOT = sum for I = 0 to N-1 of SX(LX+I*INCX) * SY(LY+I*INCY), ++C where LX = 1 if INCX .GE. 0, else LX = 1+(1-N)*INCX, and LY is ++C defined in a similar way using INCY. ++C ++C***REFERENCES C. L. Lawson, R. J. Hanson, D. R. Kincaid and F. T. ++C Krogh, Basic linear algebra subprograms for Fortran ++C usage, Algorithm No. 539, Transactions on Mathematical ++C Software 5, 3 (September 1979), pp. 308-323. ++C***ROUTINES CALLED (NONE) ++C***REVISION HISTORY (YYMMDD) ++C 791001 DATE WRITTEN ++C 890831 Modified array declarations. (WRB) ++C 890831 REVISION DATE from Version 3.2 ++C 891214 Prologue converted to Version 4.0 format. (BAB) ++C 920310 Corrected definition of LX in DESCRIPTION. (WRB) ++C 920501 Reformatted the REFERENCES section. (WRB) ++C***END PROLOGUE DSDOT ++ REAL SX(*),SY(*) ++C***FIRST EXECUTABLE STATEMENT DSDOT ++ DSDOT = 0.0D0 ++ IF (N .LE. 0) RETURN ++ IF (INCX.EQ.INCY .AND. INCX.GT.0) GO TO 20 ++C ++C Code for unequal or nonpositive increments. ++C ++ KX = 1 ++ KY = 1 ++ IF (INCX .LT. 0) KX = 1+(1-N)*INCX ++ IF (INCY .LT. 0) KY = 1+(1-N)*INCY ++ DO 10 I = 1,N ++ DSDOT = DSDOT + DBLE(SX(KX))*DBLE(SY(KY)) ++ KX = KX + INCX ++ KY = KY + INCY ++ 10 CONTINUE ++ RETURN ++C ++C Code for equal, positive, non-unit increments. ++C ++ 20 NS = N*INCX ++ DO 30 I = 1,NS,INCX ++ DSDOT = DSDOT + DBLE(SX(I))*DBLE(SY(I)) ++ 30 CONTINUE ++ RETURN ++ END +--- LAPACK/BLAS/SRC/srotm.f.BAD 2005-09-28 18:00:17.000000000 -0500 ++++ LAPACK/BLAS/SRC/srotm.f 2005-09-28 18:01:34.000000000 -0500 +@@ -0,0 +1,106 @@ ++ SUBROUTINE SROTM (N,SX,INCX,SY,INCY,SPARAM) ++C ++C APPLY THE MODIFIED GIVENS TRANSFORMATION, H, TO THE 2 BY N MATRIX ++C ++C (SX**T) , WHERE **T INDICATES TRANSPOSE. THE ELEMENTS OF SX ARE IN ++C (DX**T) ++C ++C SX(LX+I*INCX), I = 0 TO N-1, WHERE LX = 1 IF INCX .GE. 0, ELSE ++C LX = (-INCX)*N, AND SIMILARLY FOR SY USING USING LY AND INCY. ++C WITH SPARAM(1)=SFLAG, H HAS ONE OF THE FOLLOWING FORMS.. ++C ++C SFLAG=-1.E0 SFLAG=0.E0 SFLAG=1.E0 SFLAG=-2.E0 ++C ++C (SH11 SH12) (1.E0 SH12) (SH11 1.E0) (1.E0 0.E0) ++C H=( ) ( ) ( ) ( ) ++C (SH21 SH22), (SH21 1.E0), (-1.E0 SH22), (0.E0 1.E0). ++C SEE SROTMG FOR A DESCRIPTION OF DATA STORAGE IN SPARAM. ++C ++ DIMENSION SX(1),SY(1),SPARAM(5) ++ DATA ZERO,TWO/0.E0,2.E0/ ++C ++ SFLAG=SPARAM(1) ++ IF(N .LE. 0 .OR.(SFLAG+TWO.EQ.ZERO)) GO TO 140 ++ IF(.NOT.(INCX.EQ.INCY.AND. INCX .GT.0)) GO TO 70 ++C ++ NSTEPS=N*INCX ++ IF(SFLAG) 50,10,30 ++ 10 CONTINUE ++ SH12=SPARAM(4) ++ SH21=SPARAM(3) ++ DO 20 I=1,NSTEPS,INCX ++ W=SX(I) ++ Z=SY(I) ++ SX(I)=W+Z*SH12 ++ SY(I)=W*SH21+Z ++ 20 CONTINUE ++ GO TO 140 ++ 30 CONTINUE ++ SH11=SPARAM(2) ++ SH22=SPARAM(5) ++ DO 40 I=1,NSTEPS,INCX ++ W=SX(I) ++ Z=SY(I) ++ SX(I)=W*SH11+Z ++ SY(I)=-W+SH22*Z ++ 40 CONTINUE ++ GO TO 140 ++ 50 CONTINUE ++ SH11=SPARAM(2) ++ SH12=SPARAM(4) ++ SH21=SPARAM(3) ++ SH22=SPARAM(5) ++ DO 60 I=1,NSTEPS,INCX ++ W=SX(I) ++ Z=SY(I) ++ SX(I)=W*SH11+Z*SH12 ++ SY(I)=W*SH21+Z*SH22 ++ 60 CONTINUE ++ GO TO 140 ++ 70 CONTINUE ++ KX=1 ++ KY=1 ++ IF(INCX .LT. 0) KX=1+(1-N)*INCX ++ IF(INCY .LT. 0) KY=1+(1-N)*INCY ++C ++ IF(SFLAG)120,80,100 ++ 80 CONTINUE ++ SH12=SPARAM(4) ++ SH21=SPARAM(3) ++ DO 90 I=1,N ++ W=SX(KX) ++ Z=SY(KY) ++ SX(KX)=W+Z*SH12 ++ SY(KY)=W*SH21+Z ++ KX=KX+INCX ++ KY=KY+INCY ++ 90 CONTINUE ++ GO TO 140 ++ 100 CONTINUE ++ SH11=SPARAM(2) ++ SH22=SPARAM(5) ++ DO 110 I=1,N ++ W=SX(KX) ++ Z=SY(KY) ++ SX(KX)=W*SH11+Z ++ SY(KY)=-W+SH22*Z ++ KX=KX+INCX ++ KY=KY+INCY ++ 110 CONTINUE ++ GO TO 140 ++ 120 CONTINUE ++ SH11=SPARAM(2) ++ SH12=SPARAM(4) ++ SH21=SPARAM(3) ++ SH22=SPARAM(5) ++ DO 130 I=1,N ++ W=SX(KX) ++ Z=SY(KY) ++ SX(KX)=W*SH11+Z*SH12 ++ SY(KY)=W*SH21+Z*SH22 ++ KX=KX+INCX ++ KY=KY+INCY ++ 130 CONTINUE ++ 140 CONTINUE ++ RETURN ++ END +--- LAPACK/BLAS/SRC/zdrot.f.BAD 2005-09-28 18:00:31.000000000 -0500 ++++ LAPACK/BLAS/SRC/zdrot.f 2005-09-28 18:02:00.000000000 -0500 +@@ -0,0 +1,38 @@ ++ subroutine zdrot (n,zx,incx,zy,incy,c,s) ++c ++c applies a plane rotation, where the cos and sin (c and s) are ++c double precision and the vectors zx and zy are double complex. ++c jack dongarra, linpack, 3/11/78. ++c ++ double complex zx(1),zy(1),ztemp ++ double precision c,s ++ integer i,incx,incy,ix,iy,n ++c ++ if(n.le.0)return ++ if(incx.eq.1.and.incy.eq.1)go to 20 ++c ++c code for unequal increments or equal increments not equal ++c to 1 ++c ++ ix = 1 ++ iy = 1 ++ if(incx.lt.0)ix = (-n+1)*incx + 1 ++ if(incy.lt.0)iy = (-n+1)*incy + 1 ++ do 10 i = 1,n ++ ztemp = c*zx(ix) + s*zy(iy) ++ zy(iy) = c*zy(iy) - s*zx(ix) ++ zx(ix) = ztemp ++ ix = ix + incx ++ iy = iy + incy ++ 10 continue ++ return ++c ++c code for both increments equal to 1 ++c ++ 20 do 30 i = 1,n ++ ztemp = c*zx(i) + s*zy(i) ++ zy(i) = c*zy(i) - s*zx(i) ++ zx(i) = ztemp ++ 30 continue ++ return ++ end +--- LAPACK/BLAS/SRC/drotm.f.BAD 2005-09-28 17:59:52.000000000 -0500 ++++ LAPACK/BLAS/SRC/drotm.f 2005-09-28 18:00:50.000000000 -0500 +@@ -0,0 +1,108 @@ ++ SUBROUTINE DROTM (N,DX,INCX,DY,INCY,DPARAM) ++C ++C APPLY THE MODIFIED GIVENS TRANSFORMATION, H, TO THE 2 BY N MATRIX ++C ++C (DX**T) , WHERE **T INDICATES TRANSPOSE. THE ELEMENTS OF DX ARE IN ++C (DY**T) ++C ++C DX(LX+I*INCX), I = 0 TO N-1, WHERE LX = 1 IF INCX .GE. 0, ELSE ++C LX = (-INCX)*N, AND SIMILARLY FOR SY USING LY AND INCY. ++C WITH DPARAM(1)=DFLAG, H HAS ONE OF THE FOLLOWING FORMS.. ++C ++C DFLAG=-1.D0 DFLAG=0.D0 DFLAG=1.D0 DFLAG=-2.D0 ++C ++C (DH11 DH12) (1.D0 DH12) (DH11 1.D0) (1.D0 0.D0) ++C H=( ) ( ) ( ) ( ) ++C (DH21 DH22), (DH21 1.D0), (-1.D0 DH22), (0.D0 1.D0). ++C SEE DROTMG FOR A DESCRIPTION OF DATA STORAGE IN DPARAM. ++C ++ DOUBLE PRECISION DFLAG,DH12,DH22,DX,TWO,Z,DH11,DH21, ++ 1 DPARAM,DY,W,ZERO ++ DIMENSION DX(1),DY(1),DPARAM(5) ++ DATA ZERO,TWO/0.D0,2.D0/ ++C ++ DFLAG=DPARAM(1) ++ IF(N .LE. 0 .OR.(DFLAG+TWO.EQ.ZERO)) GO TO 140 ++ IF(.NOT.(INCX.EQ.INCY.AND. INCX .GT.0)) GO TO 70 ++C ++ NSTEPS=N*INCX ++ IF(DFLAG) 50,10,30 ++ 10 CONTINUE ++ DH12=DPARAM(4) ++ DH21=DPARAM(3) ++ DO 20 I=1,NSTEPS,INCX ++ W=DX(I) ++ Z=DY(I) ++ DX(I)=W+Z*DH12 ++ DY(I)=W*DH21+Z ++ 20 CONTINUE ++ GO TO 140 ++ 30 CONTINUE ++ DH11=DPARAM(2) ++ DH22=DPARAM(5) ++ DO 40 I=1,NSTEPS,INCX ++ W=DX(I) ++ Z=DY(I) ++ DX(I)=W*DH11+Z ++ DY(I)=-W+DH22*Z ++ 40 CONTINUE ++ GO TO 140 ++ 50 CONTINUE ++ DH11=DPARAM(2) ++ DH12=DPARAM(4) ++ DH21=DPARAM(3) ++ DH22=DPARAM(5) ++ DO 60 I=1,NSTEPS,INCX ++ W=DX(I) ++ Z=DY(I) ++ DX(I)=W*DH11+Z*DH12 ++ DY(I)=W*DH21+Z*DH22 ++ 60 CONTINUE ++ GO TO 140 ++ 70 CONTINUE ++ KX=1 ++ KY=1 ++ IF(INCX .LT. 0) KX=1+(1-N)*INCX ++ IF(INCY .LT. 0) KY=1+(1-N)*INCY ++C ++ IF(DFLAG)120,80,100 ++ 80 CONTINUE ++ DH12=DPARAM(4) ++ DH21=DPARAM(3) ++ DO 90 I=1,N ++ W=DX(KX) ++ Z=DY(KY) ++ DX(KX)=W+Z*DH12 ++ DY(KY)=W*DH21+Z ++ KX=KX+INCX ++ KY=KY+INCY ++ 90 CONTINUE ++ GO TO 140 ++ 100 CONTINUE ++ DH11=DPARAM(2) ++ DH22=DPARAM(5) ++ DO 110 I=1,N ++ W=DX(KX) ++ Z=DY(KY) ++ DX(KX)=W*DH11+Z ++ DY(KY)=-W+DH22*Z ++ KX=KX+INCX ++ KY=KY+INCY ++ 110 CONTINUE ++ GO TO 140 ++ 120 CONTINUE ++ DH11=DPARAM(2) ++ DH12=DPARAM(4) ++ DH21=DPARAM(3) ++ DH22=DPARAM(5) ++ DO 130 I=1,N ++ W=DX(KX) ++ Z=DY(KY) ++ DX(KX)=W*DH11+Z*DH12 ++ DY(KY)=W*DH21+Z*DH22 ++ KX=KX+INCX ++ KY=KY+INCY ++ 130 CONTINUE ++ 140 CONTINUE ++ RETURN ++ END diff --git a/blasqr.ps b/blasqr.ps new file mode 100644 index 0000000..f582cac --- /dev/null +++ b/blasqr.ps @@ -0,0 +1,3312 @@ +%!PS-Adobe-2.0 +%%Creator: dvipsk 5.58f Copyright 1986, 1994 Radical Eye Software +%%Title: blasrefcard.dvi +%%Pages: 3 +%%PageOrder: Ascend +%%Orientation: Landscape +%%BoundingBox: 0 0 612 792 +%%DocumentPaperSizes: Letter +%%EndComments +%DVIPSCommandLine: dvips -t landscape -o blasqr.ps blasrefcard.dvi +%DVIPSParameters: dpi=600, comments removed +%DVIPSSource: TeX output 1997.05.11:1256 +%%BeginProcSet: tex.pro +/TeXDict 250 dict def TeXDict begin /N{def}def /B{bind def}N /S{exch}N +/X{S N}B /TR{translate}N /isls false N /vsize 11 72 mul N /hsize 8.5 72 +mul N /landplus90{false}def /@rigin{isls{[0 landplus90{1 -1}{-1 1} +ifelse 0 0 0]concat}if 72 Resolution div 72 VResolution div neg scale +isls{landplus90{VResolution 72 div vsize mul 0 exch}{Resolution -72 div +hsize mul 0}ifelse TR}if Resolution VResolution vsize -72 div 1 add mul +TR[matrix currentmatrix{dup dup round sub abs 0.00001 lt{round}if} +forall round exch round exch]setmatrix}N /@landscape{/isls true N}B +/@manualfeed{statusdict /manualfeed true put}B /@copies{/#copies X}B +/FMat[1 0 0 -1 0 0]N /FBB[0 0 0 0]N /nn 0 N /IE 0 N /ctr 0 N /df-tail{ +/nn 8 dict N nn begin /FontType 3 N /FontMatrix fntrx N /FontBBox FBB N +string /base X array /BitMaps X /BuildChar{CharBuilder}N /Encoding IE N +end dup{/foo setfont}2 array copy cvx N load 0 nn put /ctr 0 N[}B /df{ +/sf 1 N /fntrx FMat N df-tail}B /dfs{div /sf X /fntrx[sf 0 0 sf neg 0 0] +N df-tail}B /E{pop nn dup definefont setfont}B /ch-width{ch-data dup +length 5 sub get}B /ch-height{ch-data dup length 4 sub get}B /ch-xoff{ +128 ch-data dup length 3 sub get sub}B /ch-yoff{ch-data dup length 2 sub +get 127 sub}B /ch-dx{ch-data dup length 1 sub get}B /ch-image{ch-data +dup type /stringtype ne{ctr get /ctr ctr 1 add N}if}B /id 0 N /rw 0 N +/rc 0 N /gp 0 N /cp 0 N /G 0 N /sf 0 N /CharBuilder{save 3 1 roll S dup +/base get 2 index get S /BitMaps get S get /ch-data X pop /ctr 0 N ch-dx +0 ch-xoff ch-yoff ch-height sub ch-xoff ch-width add ch-yoff +setcachedevice ch-width ch-height true[1 0 0 -1 -.1 ch-xoff sub ch-yoff +.1 sub]{ch-image}imagemask restore}B /D{/cc X dup type /stringtype ne{]} +if nn /base get cc ctr put nn /BitMaps get S ctr S sf 1 ne{dup dup +length 1 sub dup 2 index S get sf div put}if put /ctr ctr 1 add N}B /I{ +cc 1 add D}B /bop{userdict /bop-hook known{bop-hook}if /SI save N @rigin +0 0 moveto /V matrix currentmatrix dup 1 get dup mul exch 0 get dup mul +add .99 lt{/QV}{/RV}ifelse load def pop pop}N /eop{SI restore userdict +/eop-hook known{eop-hook}if showpage}N /@start{userdict /start-hook +known{start-hook}if pop /VResolution X /Resolution X 1000 div /DVImag X +/IE 256 array N 0 1 255{IE S 1 string dup 0 3 index put cvn put}for +65781.76 div /vsize X 65781.76 div /hsize X}N /p{show}N /RMat[1 0 0 -1 0 +0]N /BDot 260 string N /rulex 0 N /ruley 0 N /v{/ruley X /rulex X V}B /V +{}B /RV statusdict begin /product where{pop product dup length 7 ge{0 7 +getinterval dup(Display)eq exch 0 4 getinterval(NeXT)eq or}{pop false} +ifelse}{false}ifelse end{{gsave TR -.1 .1 TR 1 1 scale rulex ruley false +RMat{BDot}imagemask grestore}}{{gsave TR -.1 .1 TR rulex ruley scale 1 1 +false RMat{BDot}imagemask grestore}}ifelse B /QV{gsave newpath transform +round exch round exch itransform moveto rulex 0 rlineto 0 ruley neg +rlineto rulex neg 0 rlineto fill grestore}B /a{moveto}B /delta 0 N /tail +{dup /delta X 0 rmoveto}B /M{S p delta add tail}B /b{S p tail}B /c{-4 M} +B /d{-3 M}B /e{-2 M}B /f{-1 M}B /g{0 M}B /h{1 M}B /i{2 M}B /j{3 M}B /k{ +4 M}B /w{0 rmoveto}B /l{p -4 w}B /m{p -3 w}B /n{p -2 w}B /o{p -1 w}B /q{ +p 1 w}B /r{p 2 w}B /s{p 3 w}B /t{p 4 w}B /x{0 S rmoveto}B /y{3 2 roll p +a}B /bos{/SS save N}B /eos{SS restore}B end +%%EndProcSet +TeXDict begin 52099146 40258431 1000 600 600 (blasrefcard.dvi) +@start /Fa 30 122 df<07801FE03FF07FF87FF8FFFCFFFCFFFCFFFC7FF87FF83FF01F +E007800E0E798D1D>46 D<00000000FC0000000000000000FC0000000000000001FE0000 +000000000001FE0000000000000003FF0000000000000003FF0000000000000003FF0000 +000000000007FF8000000000000007FF800000000000000FFFC00000000000000FFFC000 +00000000000FFFC00000000000001FFFE00000000000001FFFE00000000000003FFFF000 +00000000003FFFF00000000000007FFFF80000000000007DFFF80000000000007CFFF800 +0000000000FCFFFC000000000000F87FFC000000000001F87FFE000000000001F03FFE00 +0000000001F03FFE000000000003F03FFF000000000003E01FFF000000000007E01FFF80 +0000000007C00FFF80000000000FC00FFFC0000000000F800FFFC0000000000F8007FFC0 +000000001F8007FFE0000000001F0003FFE0000000003F0003FFF0000000003E0003FFF0 +000000003E0001FFF0000000007E0001FFF8000000007C0000FFF800000000FC0000FFFC +00000000F800007FFC00000001F800007FFE00000001FFFFFFFFFE00000001FFFFFFFFFE +00000003FFFFFFFFFF00000003FFFFFFFFFF00000007E000001FFF80000007C000001FFF +80000007C000000FFF8000000FC000000FFFC000000F80000007FFC000001F80000007FF +E000001F00000003FFE000001F00000003FFE000003F00000003FFF000003E00000001FF +F000007E00000001FFF800007C00000000FFF80001FF00000000FFFC00FFFFFC0000FFFF +FFFCFFFFFC0000FFFFFFFCFFFFFC0000FFFFFFFCFFFFFC0000FFFFFFFC463E7CBD4F>65 +D<00000007FFC0000E00000000FFFFF8001E00000007FFFFFF003E0000001FFFFFFFC0FE +0000007FFF003FF1FE000001FFF00007FBFE000007FFC00000FFFE00000FFF0000007FFE +00003FFC0000003FFE00007FF80000000FFE0000FFF000000007FE0001FFE000000007FE +0001FFC000000003FE0003FFC000000001FE0007FF8000000001FE0007FF8000000000FE +000FFF0000000000FE001FFF00000000007E001FFF00000000007E001FFE00000000007E +003FFE00000000003E003FFE00000000003E007FFE00000000003E007FFC00000000003E +007FFC000000000000007FFC00000000000000FFFC00000000000000FFFC000000000000 +00FFFC00000000000000FFFC00000000000000FFFC00000000000000FFFC000000000000 +00FFFC00000000000000FFFC00000000000000FFFC00000000000000FFFC000000000000 +00FFFC00000000000000FFFC000007FFFFFFFC7FFC000007FFFFFFFC7FFC000007FFFFFF +FC7FFE000007FFFFFFFC7FFE000000003FFE003FFE000000003FFE003FFE000000003FFE +001FFE000000003FFE001FFF000000003FFE001FFF000000003FFE000FFF000000003FFE +0007FF800000003FFE0007FF800000003FFE0003FFC00000003FFE0001FFE00000003FFE +0001FFE00000003FFE0000FFF00000003FFE00007FF80000003FFE00003FFE0000007FFE +00000FFF000000FFFE000007FFC00001FFFE000001FFF00003FFFE0000007FFF001FF1FE +0000001FFFFFFFC0FE00000007FFFFFF003E00000000FFFFFC000E0000000007FFC00000 +0046407ABE52>71 D76 D78 +D<0000003FFF00000000000003FFFFF000000000001FFFFFFE00000000007FF807FF8000 +000001FFC000FFE000000007FF00003FF80000000FFC00000FFC0000003FF8000007FF00 +00007FF0000003FF800000FFE0000001FFC00001FFE0000001FFE00001FFC0000000FFE0 +0003FF800000007FF00007FF800000007FF80007FF000000003FF8000FFF000000003FFC +001FFF000000003FFE001FFE000000001FFE001FFE000000001FFE003FFE000000001FFF +003FFE000000001FFF003FFE000000001FFF007FFC000000000FFF807FFC000000000FFF +807FFC000000000FFF807FFC000000000FFF80FFFC000000000FFFC0FFFC000000000FFF +C0FFFC000000000FFFC0FFFC000000000FFFC0FFFC000000000FFFC0FFFC000000000FFF +C0FFFC000000000FFFC0FFFC000000000FFFC0FFFC000000000FFFC0FFFC000000000FFF +C0FFFC000000000FFFC0FFFC000000000FFFC0FFFC000000000FFFC07FFC000000000FFF +807FFE000000001FFF807FFE000000001FFF807FFE000000001FFF803FFE000000001FFF +003FFE000000001FFF003FFF000000003FFF001FFF000000003FFE001FFF000000003FFE +000FFF800000007FFC000FFF800000007FFC0007FFC0000000FFF80003FFC0000000FFF0 +0003FFE0000001FFF00001FFE0000001FFE00000FFF0000003FFC000007FF8000007FF80 +00003FFE00001FFF0000001FFF00003FFE00000007FFC000FFF800000001FFF807FFE000 +000000FFFFFFFFC0000000001FFFFFFE000000000003FFFFF00000000000003FFF000000 +0042407ABE4F>I82 +D<3FFFFFFFFFFFFFFC3FFFFFFFFFFFFFFC3FFFFFFFFFFFFFFC3FFFFFFFFFFFFFFC3FFC00 +3FFE003FFC3FE0003FFE0007FC7F80003FFE0001FE7F00003FFE0000FE7E00003FFE0000 +7E7E00003FFE00007E7C00003FFE00003E7C00003FFE00003E7800003FFE00001E780000 +3FFE00001E7800003FFE00001E7800003FFE00001E7800003FFE00001EF000003FFE0000 +0FF000003FFE00000FF000003FFE00000FF000003FFE00000FF000003FFE00000F000000 +3FFE0000000000003FFE0000000000003FFE0000000000003FFE0000000000003FFE0000 +000000003FFE0000000000003FFE0000000000003FFE0000000000003FFE000000000000 +3FFE0000000000003FFE0000000000003FFE0000000000003FFE0000000000003FFE0000 +000000003FFE0000000000003FFE0000000000003FFE0000000000003FFE000000000000 +3FFE0000000000003FFE0000000000003FFE0000000000003FFE0000000000003FFE0000 +000000003FFE0000000000003FFE0000000000003FFE0000000000003FFE000000000000 +3FFE0000000000003FFE0000000000003FFE0000000000003FFE0000000000003FFE0000 +000000003FFE0000000000003FFE0000000000003FFE000000000FFFFFFFFFF800000FFF +FFFFFFF800000FFFFFFFFFF800000FFFFFFFFFF800403D7CBC49>84 +DI<001FFFC0000001FFFFFC +000007FFFFFF00000FF001FFC0000FF8007FE0001FFC001FF0001FFC001FF8001FFC000F +FC001FFC000FFC000FF80007FC000FF80007FE0007F00007FE0001C00007FE0000000007 +FE0000000007FE0000000007FE0000003FFFFE000007FFFFFE00003FFFFFFE0000FFFC07 +FE0003FFE007FE0007FF0007FE001FFE0007FE001FFC0007FE003FF80007FE007FF00007 +FE007FF00007FE00FFE00007FE00FFE00007FE00FFE00007FE00FFE00007FE00FFE0000F +FE007FE0000FFE007FF0001DFE003FF8003DFE003FFC0079FF801FFE03F0FFFE07FFFFE0 +FFFE01FFFF803FFE001FFC000FFE2F287DA733>97 D<01FE0000000000FFFE0000000000 +FFFE0000000000FFFE0000000000FFFE000000000007FE000000000003FE000000000003 +FE000000000003FE000000000003FE000000000003FE000000000003FE000000000003FE +000000000003FE000000000003FE000000000003FE000000000003FE000000000003FE00 +0000000003FE000000000003FE000000000003FE000000000003FE000000000003FE0000 +00000003FE01FF80000003FE0FFFF8000003FE3FFFFE000003FE7E01FF800003FFF8007F +C00003FFE0003FF00003FFC0001FF80003FF80000FFC0003FF000007FC0003FF000007FE +0003FF000007FF0003FF000007FF0003FF000003FF8003FF000003FF8003FF000003FF80 +03FF000003FF8003FF000003FFC003FF000003FFC003FF000003FFC003FF000003FFC003 +FF000003FFC003FF000003FFC003FF000003FFC003FF000003FFC003FF000003FF8003FF +000003FF8003FF000003FF8003FF000003FF0003FF000007FF0003FF000007FE0003FF00 +0007FE0003FF00000FFC0003FF80000FF80003FFC0001FF00003FFE0003FE00003FDF000 +FFC00003F87E03FF000003F03FFFFC000003E00FFFF0000003C003FF000000323F7CBE3A +>I<00007FFE000007FFFFC0001FFFFFF0007FE007F801FF800FF803FF001FFC07FE001F +FC0FFC001FFC0FFC001FFC1FF8000FF83FF8000FF83FF80007F07FF00001C07FF0000000 +7FF00000007FF0000000FFF0000000FFF0000000FFF0000000FFF0000000FFF0000000FF +F0000000FFF0000000FFF00000007FF00000007FF00000007FF00000007FF80000003FF8 +0000003FF800001E1FF800001E0FFC00001E0FFC00003C07FE00007C03FF0000F801FF80 +03F0007FE01FE0001FFFFF800007FFFE0000007FF00027287DA72E>I<000000000FF000 +00000007FFF00000000007FFF00000000007FFF00000000007FFF000000000003FF00000 +0000001FF000000000001FF000000000001FF000000000001FF000000000001FF0000000 +00001FF000000000001FF000000000001FF000000000001FF000000000001FF000000000 +001FF000000000001FF000000000001FF000000000001FF000000000001FF00000000000 +1FF000000000001FF00000003FF01FF0000003FFFC1FF000000FFFFF1FF000003FF01FDF +F00000FFC003FFF00001FF0001FFF00003FE0000FFF00007FC00007FF0000FFC00003FF0 +001FF800003FF0001FF800003FF0003FF800003FF0003FF000003FF0007FF000003FF000 +7FF000003FF0007FF000003FF000FFF000003FF000FFF000003FF000FFF000003FF000FF +F000003FF000FFF000003FF000FFF000003FF000FFF000003FF000FFF000003FF0007FF0 +00003FF0007FF000003FF0007FF000003FF0007FF000003FF0003FF800003FF0003FF800 +003FF0001FF800003FF0000FF800007FF0000FFC00007FF00007FE0000FFF00003FF0003 +FFF00000FF8007FFF800007FE03FBFFFC0001FFFFE3FFFC00007FFF83FFFC000007FE03F +FFC0323F7DBE3A>I<00007FE000000007FFFE0000001FFFFF8000007FE07FE00001FF80 +1FF00003FE0007F80007FE0007FC000FFC0003FC000FF80001FE001FF80001FE003FF800 +01FF003FF80001FF007FF00000FF007FF00000FF807FF00000FF80FFF00000FF80FFF000 +00FF80FFFFFFFFFF80FFFFFFFFFF80FFFFFFFFFF80FFF000000000FFF000000000FFF000 +000000FFF0000000007FF0000000007FF0000000007FF0000000003FF0000000003FF800 +0000001FF8000007801FF8000007800FFC00000F8007FC00000F0007FE00001E0001FF00 +007E0000FF8000FC00007FF00FF000001FFFFFE0000003FFFF000000003FF8000029287D +A730>I<00000FF80000007FFF000001FFFF800007FC1FC0000FF03FE0001FE03FE0003F +C07FF0003F807FF0007F807FF0007F007FF000FF003FE000FF003FE000FF000F8000FF00 +000000FF00000000FF00000000FF00000000FF00000000FF00000000FF00000000FF0000 +0000FF00000000FF000000FFFFFFE000FFFFFFE000FFFFFFE000FFFFFFE00000FF800000 +00FF80000000FF80000000FF80000000FF80000000FF80000000FF80000000FF80000000 +FF80000000FF80000000FF80000000FF80000000FF80000000FF80000000FF80000000FF +80000000FF80000000FF80000000FF80000000FF80000000FF80000000FF80000000FF80 +000000FF80000000FF80000000FF80000000FF80000000FF80000000FF80000000FF8000 +0000FF80000000FF8000003FFFFF80003FFFFF80003FFFFF80003FFFFF8000243F7DBE20 +>I<0000000003E00003FF801FF8001FFFF07FFC007FFFFDFCFC01FF01FFE0FE03FC007F +80FE07F8003FC0FE0FF8003FE0FC0FF0001FE07C1FF0001FF0001FF0001FF0003FF0001F +F8003FF0001FF8003FF0001FF8003FF0001FF8003FF0001FF8003FF0001FF8001FF0001F +F0001FF0001FF0000FF0001FE0000FF8003FE00007F8003FC00003FC007F800003FF01FF +000003FFFFFC0000079FFFF000000703FF8000000F00000000000F00000000000F000000 +00001F00000000001F80000000000FC0000000000FE0000000000FFFFFFF00000FFFFFFF +F00007FFFFFFFC0003FFFFFFFF0003FFFFFFFF8001FFFFFFFFC003FFFFFFFFE00FFFFFFF +FFE01FC00001FFF03F8000001FF07F0000000FF87E00000007F8FE00000003F8FE000000 +03F8FE00000003F8FE00000003F8FE00000003F87F00000007F07F8000000FF03FC00000 +1FE01FE000003FC00FF80000FF8003FF0007FE0000FFFFFFF800003FFFFFE0000001FFFC +00002F3C7DA834>I<01FE0000000000FFFE0000000000FFFE0000000000FFFE00000000 +00FFFE000000000007FE000000000003FE000000000003FE000000000003FE0000000000 +03FE000000000003FE000000000003FE000000000003FE000000000003FE000000000003 +FE000000000003FE000000000003FE000000000003FE000000000003FE000000000003FE +000000000003FE000000000003FE000000000003FE000000000003FE003FF0000003FE01 +FFFE000003FE07FFFF800003FE0FC07FC00003FE1E003FE00003FE7C003FE00003FE7800 +1FF00003FEE0001FF00003FFE0001FF80003FFC0001FF80003FF80001FF80003FF80001F +F80003FF80001FF80003FF00001FF80003FF00001FF80003FF00001FF80003FF00001FF8 +0003FF00001FF80003FF00001FF80003FF00001FF80003FF00001FF80003FF00001FF800 +03FF00001FF80003FF00001FF80003FF00001FF80003FF00001FF80003FF00001FF80003 +FF00001FF80003FF00001FF80003FF00001FF80003FF00001FF80003FF00001FF80003FF +00001FF80003FF00001FF80003FF00001FF80003FF00001FF800FFFFFC07FFFFE0FFFFFC +07FFFFE0FFFFFC07FFFFE0FFFFFC07FFFFE0333F7CBE3A>I<01E00007F8000FFC001FFE +001FFE003FFF003FFF003FFF003FFF001FFE001FFE000FFC0007F80001E0000000000000 +0000000000000000000000000000000000000000000000000000FE00FFFE00FFFE00FFFE +00FFFE0007FE0003FE0003FE0003FE0003FE0003FE0003FE0003FE0003FE0003FE0003FE +0003FE0003FE0003FE0003FE0003FE0003FE0003FE0003FE0003FE0003FE0003FE0003FE +0003FE0003FE0003FE0003FE0003FE0003FE0003FE0003FE00FFFFF0FFFFF0FFFFF0FFFF +F014407BBF1E>I<01FE0000000000FFFE0000000000FFFE0000000000FFFE0000000000 +FFFE000000000007FE000000000003FE000000000003FE000000000003FE000000000003 +FE000000000003FE000000000003FE000000000003FE000000000003FE000000000003FE +000000000003FE000000000003FE000000000003FE000000000003FE000000000003FE00 +0000000003FE000000000003FE000000000003FE000000000003FE0007FFFC0003FE0007 +FFFC0003FE0007FFFC0003FE0007FFFC0003FE0000FF000003FE0000FC000003FE0003F8 +000003FE0007E0000003FE000FC0000003FE001F80000003FE007F00000003FE00FC0000 +0003FE01F800000003FE03F000000003FE0FE000000003FE1FE000000003FE3FF0000000 +03FE7FF000000003FEFFF800000003FFFFFC00000003FFE7FE00000003FFC3FF00000003 +FF83FF00000003FE01FF80000003FE00FFC0000003FE007FE0000003FE003FF0000003FE +003FF0000003FE001FF8000003FE000FFC000003FE0007FE000003FE0003FF000003FE00 +03FF000003FE0001FF800003FE0000FFC00003FE0000FFE000FFFFF807FFFF80FFFFF807 +FFFF80FFFFF807FFFF80FFFFF807FFFF80313F7CBE37>107 D<01FE00FFFE00FFFE00FF +FE00FFFE0007FE0003FE0003FE0003FE0003FE0003FE0003FE0003FE0003FE0003FE0003 +FE0003FE0003FE0003FE0003FE0003FE0003FE0003FE0003FE0003FE0003FE0003FE0003 +FE0003FE0003FE0003FE0003FE0003FE0003FE0003FE0003FE0003FE0003FE0003FE0003 +FE0003FE0003FE0003FE0003FE0003FE0003FE0003FE0003FE0003FE0003FE0003FE0003 +FE0003FE0003FE0003FE0003FE0003FE0003FE0003FE00FFFFF8FFFFF8FFFFF8FFFFF815 +3F7BBE1E>I<01FE003FF00001FF8000FFFE01FFFE000FFFF000FFFE07FFFF803FFFFC00 +FFFE0FC07FC07E03FE00FFFE1E003FE0F001FF0007FE7C003FE3E001FF0003FE78001FF3 +C000FF8003FEE0001FF70000FF8003FFE0001FFF0000FFC003FFC0001FFE0000FFC003FF +80001FFC0000FFC003FF80001FFC0000FFC003FF80001FFC0000FFC003FF00001FF80000 +FFC003FF00001FF80000FFC003FF00001FF80000FFC003FF00001FF80000FFC003FF0000 +1FF80000FFC003FF00001FF80000FFC003FF00001FF80000FFC003FF00001FF80000FFC0 +03FF00001FF80000FFC003FF00001FF80000FFC003FF00001FF80000FFC003FF00001FF8 +0000FFC003FF00001FF80000FFC003FF00001FF80000FFC003FF00001FF80000FFC003FF +00001FF80000FFC003FF00001FF80000FFC003FF00001FF80000FFC003FF00001FF80000 +FFC003FF00001FF80000FFC003FF00001FF80000FFC003FF00001FF80000FFC003FF0000 +1FF80000FFC0FFFFFC07FFFFE03FFFFFFFFFFC07FFFFE03FFFFFFFFFFC07FFFFE03FFFFF +FFFFFC07FFFFE03FFFFF50287CA757>I<01FE003FF00000FFFE01FFFE0000FFFE07FFFF +8000FFFE0FC07FC000FFFE1E003FE00007FE7C003FE00003FE78001FF00003FEE0001FF0 +0003FFE0001FF80003FFC0001FF80003FF80001FF80003FF80001FF80003FF80001FF800 +03FF00001FF80003FF00001FF80003FF00001FF80003FF00001FF80003FF00001FF80003 +FF00001FF80003FF00001FF80003FF00001FF80003FF00001FF80003FF00001FF80003FF +00001FF80003FF00001FF80003FF00001FF80003FF00001FF80003FF00001FF80003FF00 +001FF80003FF00001FF80003FF00001FF80003FF00001FF80003FF00001FF80003FF0000 +1FF80003FF00001FF80003FF00001FF800FFFFFC07FFFFE0FFFFFC07FFFFE0FFFFFC07FF +FFE0FFFFFC07FFFFE033287CA73A>I<00007FF000000007FFFF0000001FFFFFC000007F +E03FF00001FF800FFC0003FE0003FE0007FC0001FF000FFC0001FF800FF80000FF801FF8 +0000FFC03FF80000FFE03FF000007FE07FF000007FF07FF000007FF07FF000007FF07FF0 +00007FF0FFF000007FF8FFF000007FF8FFF000007FF8FFF000007FF8FFF000007FF8FFF0 +00007FF8FFF000007FF8FFF000007FF8FFF000007FF87FF000007FF07FF000007FF07FF0 +00007FF03FF000007FE03FF80000FFE01FF80000FFC01FF80000FFC00FFC0001FF8007FC +0001FF0003FE0003FE0001FF800FFC00007FE03FF000003FFFFFE0000007FFFF00000000 +7FF000002D287DA734>I<01FE01FF800000FFFE0FFFF80000FFFE3FFFFE0000FFFE7E03 +FF8000FFFFF800FFC00003FFE0007FF00003FFC0003FF80003FF80001FFC0003FF00000F +FC0003FF00000FFE0003FF000007FF0003FF000007FF0003FF000007FF8003FF000007FF +8003FF000003FF8003FF000003FF8003FF000003FFC003FF000003FFC003FF000003FFC0 +03FF000003FFC003FF000003FFC003FF000003FFC003FF000003FFC003FF000003FFC003 +FF000003FF8003FF000003FF8003FF000007FF8003FF000007FF0003FF000007FF0003FF +00000FFE0003FF00000FFE0003FF00000FFC0003FF80001FF80003FFC0003FF00003FFE0 +007FE00003FFF000FFC00003FF7E03FF000003FF3FFFFC000003FF0FFFF0000003FF03FF +00000003FF000000000003FF000000000003FF000000000003FF000000000003FF000000 +000003FF000000000003FF000000000003FF000000000003FF000000000003FF00000000 +0003FF000000000003FF000000000003FF000000000003FF0000000000FFFFFC00000000 +FFFFFC00000000FFFFFC00000000FFFFFC00000000323A7CA73A>I<01FC03F800FFFC0F +FF00FFFC1FFFC0FFFC3C1FE0FFFC783FE007FCF07FF003FDE07FF003FDC07FF003FF807F +F003FF803FE003FF803FE003FF001FC003FF00070003FF00000003FE00000003FE000000 +03FE00000003FE00000003FE00000003FE00000003FE00000003FE00000003FE00000003 +FE00000003FE00000003FE00000003FE00000003FE00000003FE00000003FE00000003FE +00000003FE00000003FE00000003FE00000003FE00000003FE000000FFFFFE0000FFFFFE +0000FFFFFE0000FFFFFE000024287CA72B>114 D<001FFC0E0001FFFFBE0007FFFFFE00 +0FE007FE001F0000FE003E00007E007C00003E007C00001E007C00001E00FC00001E00FE +00001E00FF00001E00FF80000000FFF80000007FFFE000007FFFFE00003FFFFFC0003FFF +FFF0001FFFFFF80007FFFFFC0003FFFFFE0000FFFFFF00001FFFFF8000007FFF80000003 +FFC00000007FC07000003FC0F000001FC0F000000FC0F800000FC0F800000FC0FC00000F +80FC00000F80FE00001F00FF00001F00FFC0007E00FFF801FC00FFFFFFF000F87FFFC000 +E00FFE000022287DA729>I<0007800000078000000780000007800000078000000F8000 +000F8000000F8000000F8000001F8000001F8000001F8000003F8000007F8000007F8000 +00FF800001FF800007FF80001FFFFFFEFFFFFFFEFFFFFFFEFFFFFFFE01FF800001FF8000 +01FF800001FF800001FF800001FF800001FF800001FF800001FF800001FF800001FF8000 +01FF800001FF800001FF800001FF800001FF800001FF800001FF800001FF800001FF8000 +01FF800F01FF800F01FF800F01FF800F01FF800F01FF800F01FF800F01FF800F00FF800E +00FF801E007FC01E007FE03C003FF078000FFFF80003FFE000007F80203A7EB929>I<00 +FF000007F800FFFF0007FFF800FFFF0007FFF800FFFF0007FFF800FFFF0007FFF80007FF +00003FF80003FF00001FF80003FF00001FF80003FF00001FF80003FF00001FF80003FF00 +001FF80003FF00001FF80003FF00001FF80003FF00001FF80003FF00001FF80003FF0000 +1FF80003FF00001FF80003FF00001FF80003FF00001FF80003FF00001FF80003FF00001F +F80003FF00001FF80003FF00001FF80003FF00001FF80003FF00001FF80003FF00001FF8 +0003FF00001FF80003FF00001FF80003FF00001FF80003FF00003FF80003FF00003FF800 +03FF00003FF80001FF00007FF80001FF0000FFF80000FF0001EFF80000FF8003CFFC0000 +7FE00F8FFFE0001FFFFF0FFFE00007FFFC0FFFE00000FFF00FFFE033287CA73A>II121 D E /Fb 13 118 df<0000000000000FE000000000 +00000000000000001FF00000000000000000000000001FF0000000000000000000000000 +3FF80000000000000000000000003FF80000000000000000000000003FF8000000000000 +0000000000007FFC0000000000000000000000007FFC000000000000000000000000FFFE +000000000000000000000000FFFE000000000000000000000000FFFE0000000000000000 +00000001FFFF000000000000000000000001FFFF000000000000000000000003FFFF8000 +00000000000000000003FFFF800000000000000000000007FFFFC0000000000000000000 +0007FFFFC00000000000000000000007FFFFC0000000000000000000000FFFFFE0000000 +000000000000000FFFFFE0000000000000000000001FFFFFF0000000000000000000001F +FFFFF0000000000000000000001FFFFFF0000000000000000000003FFFFFF80000000000 +00000000003FFFFFF8000000000000000000007FBFFFFC000000000000000000007FBFFF +FC000000000000000000007F1FFFFC00000000000000000000FF1FFFFE00000000000000 +000000FE1FFFFE00000000000000000001FE0FFFFF00000000000000000001FE0FFFFF00 +000000000000000001FC07FFFF00000000000000000003FC07FFFF800000000000000000 +03F807FFFF80000000000000000007F803FFFFC0000000000000000007F803FFFFC00000 +00000000000007F001FFFFC000000000000000000FF001FFFFE000000000000000000FE0 +01FFFFE000000000000000001FE000FFFFF000000000000000001FE000FFFFF000000000 +000000001FC0007FFFF000000000000000003FC0007FFFF800000000000000003F80007F +FFF800000000000000007F80003FFFFC00000000000000007F80003FFFFC000000000000 +00007F00001FFFFC0000000000000000FF00001FFFFE0000000000000000FE00001FFFFE +0000000000000001FE00000FFFFF0000000000000001FE00000FFFFF0000000000000001 +FC000007FFFF0000000000000003FC000007FFFF8000000000000003F8000007FFFF8000 +000000000007F8000003FFFFC000000000000007F8000003FFFFC000000000000007F000 +0001FFFFC00000000000000FF0000001FFFFE00000000000000FE0000001FFFFE0000000 +0000001FE0000000FFFFF00000000000001FE0000000FFFFF00000000000001FC0000000 +7FFFF00000000000003FC00000007FFFF80000000000003F800000007FFFF80000000000 +007F800000003FFFFC0000000000007FFFFFFFFFFFFFFC0000000000007FFFFFFFFFFFFF +FC000000000000FFFFFFFFFFFFFFFE000000000000FFFFFFFFFFFFFFFE000000000001FF +FFFFFFFFFFFFFF000000000001FFFFFFFFFFFFFFFF000000000001FC0000000007FFFF00 +0000000003FC0000000007FFFF800000000003F80000000007FFFF800000000007F80000 +000003FFFFC00000000007F00000000003FFFFC00000000007F00000000001FFFFC00000 +00000FF00000000001FFFFE0000000000FE00000000000FFFFE0000000001FE000000000 +00FFFFF0000000001FC00000000000FFFFF0000000001FC000000000007FFFF000000000 +3FC000000000007FFFF8000000003F8000000000003FFFF8000000007F8000000000003F +FFFC000000007F0000000000003FFFFC000000007F0000000000001FFFFC00000000FF00 +00000000001FFFFE00000000FE0000000000000FFFFE00000001FE0000000000000FFFFF +00000001FC0000000000000FFFFF00000003FE00000000000007FFFF8000001FFFC00000 +00000007FFFF8000FFFFFFFFE0000000FFFFFFFFFFFEFFFFFFFFE0000000FFFFFFFFFFFE +FFFFFFFFE0000000FFFFFFFFFFFEFFFFFFFFE0000000FFFFFFFFFFFEFFFFFFFFE0000000 +FFFFFFFFFFFEFFFFFFFFE0000000FFFFFFFFFFFE6F647BE37A>65 +D<000000000001FFFF00000003C00000000000007FFFFFF0000007C0000000000007FFFF +FFFE00000FC000000000007FFFFFFFFFC0001FC00000000001FFFFFFFFFFF0003FC00000 +00000FFFFFFFFFFFFC007FC0000000003FFFFFF8007FFE00FFC000000000FFFFFF000007 +FF81FFC000000001FFFFF0000001FFC3FFC000000007FFFFC00000007FE7FFC00000000F +FFFF000000001FFFFFC00000003FFFFC0000000007FFFFC00000007FFFF00000000003FF +FFC0000000FFFFE00000000001FFFFC0000001FFFF800000000000FFFFC0000003FFFF00 +00000000007FFFC0000007FFFE0000000000003FFFC000000FFFFC0000000000001FFFC0 +00001FFFF80000000000000FFFC000003FFFF800000000000007FFC000003FFFF0000000 +00000007FFC000007FFFE000000000000003FFC00000FFFFE000000000000003FFC00001 +FFFFC000000000000001FFC00001FFFFC000000000000001FFC00003FFFF800000000000 +0000FFC00003FFFF8000000000000000FFC00007FFFF00000000000000007FC00007FFFF +00000000000000007FC0000FFFFF00000000000000007FC0000FFFFE0000000000000000 +3FC0001FFFFE00000000000000003FC0001FFFFE00000000000000003FC0001FFFFE0000 +0000000000003FC0003FFFFC00000000000000001FC0003FFFFC00000000000000001FC0 +003FFFFC00000000000000001FC0003FFFFC00000000000000001FC0007FFFFC00000000 +000000000000007FFFFC00000000000000000000007FFFF800000000000000000000007F +FFF800000000000000000000007FFFF80000000000000000000000FFFFF8000000000000 +0000000000FFFFF80000000000000000000000FFFFF80000000000000000000000FFFFF8 +0000000000000000000000FFFFF80000000000000000000000FFFFF80000000000000000 +000000FFFFF80000000000000000000000FFFFF80000000000000000000000FFFFF80000 +000000000000000000FFFFF80000000000000000000000FFFFF800000000000000000000 +00FFFFF80000000000000000000000FFFFF80000000000000000000000FFFFF800000000 +00000000000000FFFFF80000000000000000000000FFFFF800000000000000000000007F +FFF800000000000000000000007FFFF8000000001FFFFFFFFFFFF87FFFFC000000001FFF +FFFFFFFFF87FFFFC000000001FFFFFFFFFFFF87FFFFC000000001FFFFFFFFFFFF83FFFFC +000000001FFFFFFFFFFFF83FFFFC000000001FFFFFFFFFFFF83FFFFC00000000000001FF +FFC0003FFFFC00000000000001FFFFC0001FFFFE00000000000001FFFFC0001FFFFE0000 +0000000001FFFFC0001FFFFE00000000000001FFFFC0000FFFFE00000000000001FFFFC0 +000FFFFF00000000000001FFFFC00007FFFF00000000000001FFFFC00007FFFF00000000 +000001FFFFC00003FFFF80000000000001FFFFC00003FFFF80000000000001FFFFC00001 +FFFFC0000000000001FFFFC00001FFFFC0000000000001FFFFC00000FFFFE00000000000 +01FFFFC000007FFFE0000000000001FFFFC000003FFFF0000000000001FFFFC000003FFF +F8000000000001FFFFC000001FFFFC000000000001FFFFC000000FFFFC000000000001FF +FFC0000007FFFE000000000001FFFFC0000003FFFF000000000003FFFFC0000001FFFFC0 +0000000003FFFFC0000000FFFFE00000000007FFFFC00000007FFFF00000000007FFFFC0 +0000003FFFFC000000000FFFFFC00000000FFFFF000000001FFFFFC000000007FFFFC000 +00007FEFFFC000000001FFFFF8000000FFC7FFC000000000FFFFFF000007FF83FFC00000 +00003FFFFFFC003FFF01FFC0000000000FFFFFFFFFFFFC007FC00000000001FFFFFFFFFF +F8001FC000000000007FFFFFFFFFE00007C0000000000007FFFFFFFF800001C000000000 +00007FFFFFF80000000000000000000001FFFF0000000000006D6677E37F>71 +D<00000000001FFFE000000000000000000007FFFFFF8000000000000000003FFFFFFFF0 +0000000000000001FFFFFFFFFE000000000000000FFFFFFFFFFFC00000000000003FFFFC +00FFFFF0000000000000FFFFC0000FFFFC000000000003FFFE000001FFFF000000000007 +FFF80000007FFF80000000001FFFF00000003FFFE0000000003FFFC00000000FFFF00000 +00007FFF8000000007FFF800000000FFFF0000000003FFFC00000003FFFE0000000001FF +FF00000007FFFC0000000000FFFF80000007FFF800000000007FFF8000000FFFF0000000 +00003FFFC000001FFFF000000000003FFFE000003FFFE000000000001FFFF000007FFFC0 +00000000000FFFF800007FFFC000000000000FFFF80000FFFF80000000000007FFFC0001 +FFFF80000000000007FFFE0001FFFF00000000000003FFFE0003FFFF00000000000003FF +FF0003FFFE00000000000001FFFF0007FFFE00000000000001FFFF8007FFFE0000000000 +0001FFFF800FFFFE00000000000001FFFFC00FFFFC00000000000000FFFFC00FFFFC0000 +0000000000FFFFC01FFFFC00000000000000FFFFE01FFFFC00000000000000FFFFE01FFF +F8000000000000007FFFE03FFFF8000000000000007FFFF03FFFF8000000000000007FFF +F03FFFF8000000000000007FFFF03FFFF8000000000000007FFFF07FFFF8000000000000 +007FFFF87FFFF0000000000000003FFFF87FFFF0000000000000003FFFF87FFFF0000000 +000000003FFFF87FFFF0000000000000003FFFF8FFFFF0000000000000003FFFFCFFFFF0 +000000000000003FFFFCFFFFF0000000000000003FFFFCFFFFF0000000000000003FFFFC +FFFFF0000000000000003FFFFCFFFFF0000000000000003FFFFCFFFFF000000000000000 +3FFFFCFFFFF0000000000000003FFFFCFFFFF0000000000000003FFFFCFFFFF000000000 +0000003FFFFCFFFFF0000000000000003FFFFCFFFFF0000000000000003FFFFCFFFFF000 +0000000000003FFFFCFFFFF0000000000000003FFFFCFFFFF0000000000000003FFFFCFF +FFF0000000000000003FFFFC7FFFF0000000000000003FFFF87FFFF0000000000000003F +FFF87FFFF0000000000000003FFFF87FFFF0000000000000003FFFF87FFFF80000000000 +00007FFFF87FFFF8000000000000007FFFF83FFFF8000000000000007FFFF03FFFF80000 +00000000007FFFF03FFFF8000000000000007FFFF03FFFF8000000000000007FFFF01FFF +F8000000000000007FFFE01FFFFC00000000000000FFFFE01FFFFC00000000000000FFFF +E00FFFFC00000000000000FFFFC00FFFFC00000000000000FFFFC007FFFE000000000000 +01FFFF8007FFFE00000000000001FFFF8003FFFE00000000000001FFFF0003FFFF000000 +00000003FFFF0001FFFF000001FE000003FFFE0001FFFF80000FFFC00007FFFE0000FFFF +80003FFFE00007FFFC0000FFFFC0007FFFF8000FFFFC00007FFFC000FFFFFC000FFFF800 +003FFFE001FF03FE001FFFF000001FFFE003F8007F001FFFE000001FFFF003F0003F803F +FFE000000FFFF807E0001FC07FFFC0000007FFFC07E0000FE0FFFF80000003FFFE07C000 +0FE1FFFF00000001FFFF07C00007F3FFFE00000000FFFF87C00003FFFFFC000000003FFF +C7C00003FFFFF0000000001FFFE7E00003FFFFE0000000000FFFFFE00001FFFFC0000000 +0003FFFFF00001FFFF000000000000FFFFF8000FFFFC0000000000003FFFFF00FFFFF000 +00000000000FFFFFFFFFFFC000000000000003FFFFFFFFFFC000000C000000007FFFFFFF +FFC000001E0000000007FFFFFFFFE000001E00000000001FFFE07FF000001E0000000000 +0000007FF800003E00000000000000007FFC00007E00000000000000003FFE0000FE0000 +0000000000003FFF0001FE00000000000000003FFFC01FFE00000000000000003FFFFFFF +FE00000000000000001FFFFFFFFC00000000000000001FFFFFFFFC00000000000000001F +FFFFFFFC00000000000000001FFFFFFFFC00000000000000000FFFFFFFFC000000000000 +00000FFFFFFFF800000000000000000FFFFFFFF8000000000000000007FFFFFFF8000000 +000000000007FFFFFFF0000000000000000007FFFFFFF0000000000000000003FFFFFFE0 +000000000000000003FFFFFFE0000000000000000001FFFFFFC0000000000000000000FF +FFFF80000000000000000000FFFFFF000000000000000000007FFFFE0000000000000000 +00003FFFFC000000000000000000000FFFF80000000000000000000007FFE00000000000 +000000000000FF8000678077E379>81 DI<0000000FFFF00000000001FFFFFF800000000FFFFFFFF000 +00003FFFFFFFFC000000FFFFFFFFFE000003FFFE001FFF000007FFF80001FF80000FFFE0 +0007FFC0001FFF80000FFFE0007FFF00000FFFE000FFFE00001FFFF000FFFE00001FFFF0 +01FFFC00001FFFF003FFF800001FFFF007FFF800001FFFF007FFF000001FFFF00FFFF000 +001FFFF00FFFF000000FFFE01FFFE000000FFFE01FFFE0000007FFC03FFFE0000003FF80 +3FFFE0000000FE003FFFE000000000007FFFC000000000007FFFC000000000007FFFC000 +000000007FFFC00000000000FFFFC00000000000FFFFC00000000000FFFFC00000000000 +FFFFC00000000000FFFFC00000000000FFFFC00000000000FFFFC00000000000FFFFC000 +00000000FFFFC00000000000FFFFC00000000000FFFFC00000000000FFFFC00000000000 +FFFFC00000000000FFFFC000000000007FFFC000000000007FFFC000000000007FFFE000 +000000007FFFE000000000003FFFE000000000003FFFE000000000003FFFF00000000000 +1FFFF000000000FC1FFFF000000000FC0FFFF800000001FC0FFFF800000001F807FFF800 +000001F803FFFC00000003F803FFFE00000007F001FFFE00000007E000FFFF0000000FE0 +007FFF8000001FC0003FFFE000007F80001FFFF00000FF000007FFFC0003FE000003FFFF +C03FFC000000FFFFFFFFF00000003FFFFFFFE00000000FFFFFFF8000000001FFFFFC0000 +0000000FFFC000003E437BC148>99 D<000000000000007FF80000000000000FFFFFF800 +00000000000FFFFFF80000000000000FFFFFF80000000000000FFFFFF80000000000000F +FFFFF80000000000000FFFFFF8000000000000000FFFF80000000000000007FFF8000000 +0000000003FFF80000000000000003FFF80000000000000003FFF80000000000000003FF +F80000000000000003FFF80000000000000003FFF80000000000000003FFF80000000000 +000003FFF80000000000000003FFF80000000000000003FFF80000000000000003FFF800 +00000000000003FFF80000000000000003FFF80000000000000003FFF800000000000000 +03FFF80000000000000003FFF80000000000000003FFF80000000000000003FFF8000000 +0000000003FFF80000000000000003FFF80000000000000003FFF80000000000000003FF +F80000000000000003FFF80000000000000003FFF80000000000000003FFF80000000000 +000003FFF8000000000FFF8003FFF800000001FFFFF803FFF80000000FFFFFFE03FFF800 +00003FFFFFFF83FFF8000000FFFFFFFFE3FFF8000003FFFF807FF3FFF8000007FFF8000F +FBFFF800001FFFE00003FFFFF800003FFFC00000FFFFF800007FFF0000007FFFF80000FF +FE0000003FFFF80001FFFC0000001FFFF80003FFFC0000000FFFF80003FFF800000007FF +F80007FFF800000007FFF8000FFFF000000007FFF8000FFFF000000007FFF8001FFFE000 +000007FFF8001FFFE000000007FFF8003FFFE000000007FFF8003FFFE000000007FFF800 +3FFFE000000007FFF8007FFFC000000007FFF8007FFFC000000007FFF8007FFFC0000000 +07FFF8007FFFC000000007FFF800FFFFC000000007FFF800FFFFC000000007FFF800FFFF +C000000007FFF800FFFFC000000007FFF800FFFFC000000007FFF800FFFFC000000007FF +F800FFFFC000000007FFF800FFFFC000000007FFF800FFFFC000000007FFF800FFFFC000 +000007FFF800FFFFC000000007FFF800FFFFC000000007FFF800FFFFC000000007FFF800 +FFFFC000000007FFF8007FFFC000000007FFF8007FFFC000000007FFF8007FFFC0000000 +07FFF8007FFFC000000007FFF8003FFFE000000007FFF8003FFFE000000007FFF8003FFF +E000000007FFF8001FFFE000000007FFF8001FFFE000000007FFF8000FFFF000000007FF +F8000FFFF000000007FFF80007FFF00000000FFFF80007FFF80000000FFFF80003FFF800 +00001FFFF80001FFFC0000003FFFF80000FFFE0000007FFFF800007FFE000000FFFFF800 +003FFF800003FFFFFC00001FFFC00007FFFFFE00000FFFF0001FF7FFFFFE0007FFFE01FF +E7FFFFFE0001FFFFFFFF87FFFFFE00007FFFFFFE07FFFFFE00001FFFFFF807FFFFFE0000 +03FFFFE007FFFFFE0000003FFE0007FFC0004F657BE35A>I<0000000FFFC00000000000 +01FFFFFC00000000000FFFFFFF80000000003FFFFFFFE000000000FFFFFFFFF800000003 +FFFE03FFFC00000007FFF0007FFF0000001FFFC0001FFF8000003FFF80000FFFC000007F +FF000007FFE00000FFFE000003FFF00001FFFC000001FFF00003FFFC000001FFF80003FF +F8000000FFF80007FFF8000000FFFC000FFFF0000000FFFC000FFFF00000007FFE001FFF +F00000007FFE001FFFE00000007FFE003FFFE00000003FFF003FFFE00000003FFF003FFF +E00000003FFF007FFFE00000003FFF007FFFC00000003FFF007FFFC00000001FFF807FFF +C00000001FFF80FFFFC00000001FFF80FFFFC00000001FFF80FFFFC00000001FFF80FFFF +FFFFFFFFFFFF80FFFFFFFFFFFFFFFF80FFFFFFFFFFFFFFFF80FFFFFFFFFFFFFFFF80FFFF +FFFFFFFFFFFF00FFFFC0000000000000FFFFC0000000000000FFFFC0000000000000FFFF +C0000000000000FFFFC0000000000000FFFFC00000000000007FFFC00000000000007FFF +C00000000000007FFFC00000000000007FFFE00000000000007FFFE00000000000003FFF +E00000000000003FFFE00000000000001FFFE00000000000001FFFF0000000000F001FFF +F0000000001F800FFFF0000000003F8007FFF8000000003F8007FFF8000000007F0003FF +FC000000007F0001FFFE00000000FE0000FFFE00000001FE00007FFF00000003FC00003F +FF80000007F800001FFFE000000FF000000FFFF000003FE0000003FFFC0001FFC0000001 +FFFFC01FFF000000007FFFFFFFFE000000001FFFFFFFF80000000007FFFFFFE000000000 +00FFFFFF00000000000007FFF000000041437CC14A>I<000000003FFC0000000007FFFF +800000003FFFFFC0000001FFFFFFF0000007FFFFFFF800000FFFF03FFC00001FFF807FFC +00007FFE00FFFE0000FFFC00FFFE0000FFF801FFFF0001FFF001FFFF0003FFF001FFFF00 +07FFE001FFFF0007FFE001FFFF000FFFC001FFFF000FFFC001FFFF000FFFC000FFFE001F +FF80007FFC001FFF80003FF8001FFF80001FF0001FFF800007C0001FFF80000000001FFF +80000000001FFF80000000001FFF80000000001FFF80000000001FFF80000000001FFF80 +000000001FFF80000000001FFF80000000001FFF80000000001FFF80000000001FFF8000 +0000001FFF80000000001FFF80000000001FFF80000000001FFF80000000FFFFFFFFFFC0 +00FFFFFFFFFFC000FFFFFFFFFFC000FFFFFFFFFFC000FFFFFFFFFFC000FFFFFFFFFFC000 +001FFFC0000000001FFFC0000000001FFFC0000000001FFFC0000000001FFFC000000000 +1FFFC0000000001FFFC0000000001FFFC0000000001FFFC0000000001FFFC0000000001F +FFC0000000001FFFC0000000001FFFC0000000001FFFC0000000001FFFC0000000001FFF +C0000000001FFFC0000000001FFFC0000000001FFFC0000000001FFFC0000000001FFFC0 +000000001FFFC0000000001FFFC0000000001FFFC0000000001FFFC0000000001FFFC000 +0000001FFFC0000000001FFFC0000000001FFFC0000000001FFFC0000000001FFFC00000 +00001FFFC0000000001FFFC0000000001FFFC0000000001FFFC0000000001FFFC0000000 +001FFFC0000000001FFFC0000000001FFFC0000000001FFFC0000000001FFFC000000000 +1FFFC0000000001FFFC0000000001FFFC0000000001FFFC0000000001FFFC0000000001F +FFC0000000001FFFC0000000001FFFC0000000001FFFC0000000001FFFC0000000001FFF +C00000007FFFFFFFFC00007FFFFFFFFC00007FFFFFFFFC00007FFFFFFFFC00007FFFFFFF +FC00007FFFFFFFFC000038657CE431>I<000FE00000003FF80000007FFC000001FFFF00 +0001FFFF000003FFFF800003FFFF800007FFFFC00007FFFFC00007FFFFC00007FFFFC000 +07FFFFC00007FFFFC00007FFFFC00003FFFF800003FFFF800001FFFF000001FFFF000000 +7FFC0000003FF80000000FE0000000000000000000000000000000000000000000000000 +000000000000000000000000000000000000000000000000000000000000000000000000 +00000000000000000000000000000000000007FF80007FFFFF80007FFFFF80007FFFFF80 +007FFFFF80007FFFFF80007FFFFF800000FFFF8000007FFF8000003FFF8000003FFF8000 +003FFF8000003FFF8000003FFF8000003FFF8000003FFF8000003FFF8000003FFF800000 +3FFF8000003FFF8000003FFF8000003FFF8000003FFF8000003FFF8000003FFF8000003F +FF8000003FFF8000003FFF8000003FFF8000003FFF8000003FFF8000003FFF8000003FFF +8000003FFF8000003FFF8000003FFF8000003FFF8000003FFF8000003FFF8000003FFF80 +00003FFF8000003FFF8000003FFF8000003FFF8000003FFF8000003FFF8000003FFF8000 +003FFF8000003FFF8000003FFF8000003FFF8000003FFF8000003FFF8000003FFF800000 +3FFF8000003FFF8000003FFF8000003FFF8000003FFF8000FFFFFFFFC0FFFFFFFFC0FFFF +FFFFC0FFFFFFFFC0FFFFFFFFC0FFFFFFFFC022657BE42C>105 D<0007FF800000000000 +00FFFFFF80000000000000FFFFFF80000000000000FFFFFF80000000000000FFFFFF8000 +0000000000FFFFFF80000000000000FFFFFF8000000000000000FFFF8000000000000000 +7FFF80000000000000003FFF80000000000000003FFF80000000000000003FFF80000000 +000000003FFF80000000000000003FFF80000000000000003FFF80000000000000003FFF +80000000000000003FFF80000000000000003FFF80000000000000003FFF800000000000 +00003FFF80000000000000003FFF80000000000000003FFF80000000000000003FFF8000 +0000000000003FFF80000000000000003FFF80000000000000003FFF8000000000000000 +3FFF80000000000000003FFF80000000000000003FFF80000000000000003FFF80000000 +000000003FFF80000000000000003FFF80000000000000003FFF80000000000000003FFF +80000000000000003FFF80000000000000003FFF80000000000000003FFF800003FFFFFF +80003FFF800003FFFFFF80003FFF800003FFFFFF80003FFF800003FFFFFF80003FFF8000 +03FFFFFF80003FFF800003FFFFFF80003FFF8000003FFF8000003FFF8000000FFC000000 +3FFF8000001FF00000003FFF8000003FE00000003FFF800000FFC00000003FFF800001FF +800000003FFF800003FF000000003FFF800007FC000000003FFF80000FF8000000003FFF +80003FF0000000003FFF80007FE0000000003FFF8000FFC0000000003FFF8001FF000000 +00003FFF8003FE00000000003FFF800FFC00000000003FFF801FF800000000003FFF803F +F000000000003FFF807FC000000000003FFF80FFE000000000003FFF83FFF00000000000 +3FFF87FFF800000000003FFF8FFFF800000000003FFF9FFFFC00000000003FFFBFFFFE00 +000000003FFFFFFFFE00000000003FFFFFFFFF00000000003FFFFFFFFF80000000003FFF +FCFFFFC0000000003FFFF87FFFC0000000003FFFF07FFFE0000000003FFFE03FFFF00000 +00003FFFC01FFFF8000000003FFF801FFFF8000000003FFF800FFFFC000000003FFF8007 +FFFE000000003FFF8003FFFF000000003FFF8003FFFF000000003FFF8001FFFF80000000 +3FFF8000FFFFC00000003FFF80007FFFE00000003FFF80007FFFE00000003FFF80003FFF +F00000003FFF80001FFFF80000003FFF80000FFFFC0000003FFF80000FFFFC0000003FFF +800007FFFE0000003FFF800003FFFF0000003FFF800001FFFF0000003FFF800001FFFF80 +00003FFF800000FFFFC000003FFF8000007FFFE000003FFF800000FFFFF000FFFFFFFFE0 +0FFFFFFFF0FFFFFFFFE00FFFFFFFF0FFFFFFFFE00FFFFFFFF0FFFFFFFFE00FFFFFFFF0FF +FFFFFFE00FFFFFFFF0FFFFFFFFE00FFFFFFFF04C647BE355>107 +D<0007FF80001FFF00000000FFFFFF8000FFFFF0000000FFFFFF8007FFFFFC000000FFFF +FF801FFFFFFF000000FFFFFF803FFFFFFF800000FFFFFF807FE07FFFC00000FFFFFF81FE +001FFFE0000000FFFF83F8000FFFF00000007FFF87F00007FFF00000003FFF8FC00007FF +F80000003FFF8F800003FFF80000003FFF9F000003FFFC0000003FFFBE000003FFFC0000 +003FFFBC000001FFFC0000003FFFFC000001FFFE0000003FFFF8000001FFFE0000003FFF +F8000001FFFE0000003FFFF0000001FFFE0000003FFFF0000001FFFE0000003FFFE00000 +01FFFE0000003FFFE0000001FFFE0000003FFFE0000001FFFE0000003FFFE0000001FFFE +0000003FFFC0000001FFFE0000003FFFC0000001FFFE0000003FFFC0000001FFFE000000 +3FFFC0000001FFFE0000003FFFC0000001FFFE0000003FFFC0000001FFFE0000003FFFC0 +000001FFFE0000003FFFC0000001FFFE0000003FFFC0000001FFFE0000003FFFC0000001 +FFFE0000003FFFC0000001FFFE0000003FFFC0000001FFFE0000003FFFC0000001FFFE00 +00003FFFC0000001FFFE0000003FFFC0000001FFFE0000003FFFC0000001FFFE0000003F +FFC0000001FFFE0000003FFFC0000001FFFE0000003FFFC0000001FFFE0000003FFFC000 +0001FFFE0000003FFFC0000001FFFE0000003FFFC0000001FFFE0000003FFFC0000001FF +FE0000003FFFC0000001FFFE0000003FFFC0000001FFFE0000003FFFC0000001FFFE0000 +003FFFC0000001FFFE0000003FFFC0000001FFFE0000003FFFC0000001FFFE0000003FFF +C0000001FFFE0000003FFFC0000001FFFE0000003FFFC0000001FFFE0000003FFFC00000 +01FFFE0000003FFFC0000001FFFE0000003FFFC0000001FFFE0000003FFFC0000001FFFE +0000FFFFFFFFF007FFFFFFFF80FFFFFFFFF007FFFFFFFF80FFFFFFFFF007FFFFFFFF80FF +FFFFFFF007FFFFFFFF80FFFFFFFFF007FFFFFFFF80FFFFFFFFF007FFFFFFFF8051417BC0 +5A>110 D<000FFF000FFC00FFFFFF003FFF80FFFFFF00FFFFE0FFFFFF01FFFFF0FFFFFF +03FFFFF8FFFFFF07FC7FFCFFFFFF0FE0FFFE00FFFF1F80FFFE007FFF3F01FFFF003FFF3E +01FFFF003FFF7E01FFFF003FFF7C01FFFF003FFFF801FFFF003FFFF801FFFF003FFFF001 +FFFF003FFFF000FFFE003FFFF000FFFE003FFFE0007FFC003FFFE0003FF8003FFFE0000F +E0003FFFC0000000003FFFC0000000003FFFC0000000003FFFC0000000003FFFC0000000 +003FFF80000000003FFF80000000003FFF80000000003FFF80000000003FFF8000000000 +3FFF80000000003FFF80000000003FFF80000000003FFF80000000003FFF80000000003F +FF80000000003FFF80000000003FFF80000000003FFF80000000003FFF80000000003FFF +80000000003FFF80000000003FFF80000000003FFF80000000003FFF80000000003FFF80 +000000003FFF80000000003FFF80000000003FFF80000000003FFF80000000003FFF8000 +0000003FFF80000000003FFF80000000003FFF80000000003FFF80000000003FFF800000 +00003FFF80000000003FFF80000000003FFF80000000FFFFFFFFF80000FFFFFFFFF80000 +FFFFFFFFF80000FFFFFFFFF80000FFFFFFFFF80000FFFFFFFFF8000038417BC042>114 +D<0007FFC00000003FFE0000FFFFFFC00007FFFFFE0000FFFFFFC00007FFFFFE0000FFFF +FFC00007FFFFFE0000FFFFFFC00007FFFFFE0000FFFFFFC00007FFFFFE0000FFFFFFC000 +07FFFFFE000000FFFFC0000007FFFE0000007FFFC0000003FFFE0000003FFFC0000001FF +FE0000003FFFC0000001FFFE0000003FFFC0000001FFFE0000003FFFC0000001FFFE0000 +003FFFC0000001FFFE0000003FFFC0000001FFFE0000003FFFC0000001FFFE0000003FFF +C0000001FFFE0000003FFFC0000001FFFE0000003FFFC0000001FFFE0000003FFFC00000 +01FFFE0000003FFFC0000001FFFE0000003FFFC0000001FFFE0000003FFFC0000001FFFE +0000003FFFC0000001FFFE0000003FFFC0000001FFFE0000003FFFC0000001FFFE000000 +3FFFC0000001FFFE0000003FFFC0000001FFFE0000003FFFC0000001FFFE0000003FFFC0 +000001FFFE0000003FFFC0000001FFFE0000003FFFC0000001FFFE0000003FFFC0000001 +FFFE0000003FFFC0000001FFFE0000003FFFC0000001FFFE0000003FFFC0000001FFFE00 +00003FFFC0000001FFFE0000003FFFC0000001FFFE0000003FFFC0000001FFFE0000003F +FFC0000001FFFE0000003FFFC0000001FFFE0000003FFFC0000001FFFE0000003FFFC000 +0001FFFE0000003FFFC0000001FFFE0000003FFFC0000001FFFE0000003FFFC0000001FF +FE0000003FFFC0000001FFFE0000003FFFC0000003FFFE0000003FFFC0000003FFFE0000 +003FFFC0000003FFFE0000003FFFC0000007FFFE0000003FFFC0000007FFFE0000003FFF +C000000FFFFE0000001FFFC000000FFFFE0000001FFFC000001FFFFE0000001FFFC00000 +3EFFFE0000000FFFE000007EFFFE0000000FFFE00000FCFFFF00000007FFF00001F8FFFF +80000003FFF80007F0FFFFFF800001FFFF003FE0FFFFFF800000FFFFFFFFC0FFFFFF8000 +007FFFFFFF00FFFFFF8000001FFFFFFE00FFFFFF80000003FFFFF800FFFFFF800000003F +FF8000FFF0000051427BC05A>117 D E /Fc 18 118 df[<0000000000000000001FF000 +00000000000000000000000000000000003FF80000000000000000000000000000000000 +007FFC0000000000000000000000000000000000007FFC00000000000000000000000000 +0000000000FFFE000000000000000000000000000000000000FFFE000000000000000000 +000000000000000000FFFE000000000000000000000000000000000001FFFF0000000000 +00000000000000000000000001FFFF000000000000000000000000000000000003FFFF80 +0000000000000000000000000000000003FFFF8000000000000000000000000000000000 +07FFFFC00000000000000000000000000000000007FFFFC0000000000000000000000000 +0000000007FFFFC0000000000000000000000000000000000FFFFFE00000000000000000 +00000000000000000FFFFFE0000000000000000000000000000000001FFFFFF000000000 +0000000000000000000000001FFFFFF0000000000000000000000000000000001FFFFFF0 +000000000000000000000000000000003FFFFFF800000000000000000000000000000000 +3FFFFFF8000000000000000000000000000000007FFFFFFC000000000000000000000000 +000000007FFFFFFC000000000000000000000000000000007FFFFFFC0000000000000000 +0000000000000000FFFFFFFE00000000000000000000000000000000FFFFFFFE00000000 +000000000000000000000001FFFFFFFF00000000000000000000000000000001FFFFFFFF +00000000000000000000000000000001FFFFFFFF00000000000000000000000000000003 +FFFFFFFF80000000000000000000000000000003FFFFFFFF800000000000000000000000 +00000007FFFFFFFFC0000000000000000000000000000007FFFFFFFFC000000000000000 +0000000000000007FFFFFFFFC000000000000000000000000000000FFFFFFFFFE0000000 +00000000000000000000000FFFFFFFFFE000000000000000000000000000001FFFFFFFFF +F000000000000000000000000000001FFDFFFFFFF000000000000000000000000000001F +F9FFFFFFF000000000000000000000000000003FF8FFFFFFF80000000000000000000000 +0000003FF8FFFFFFF800000000000000000000000000007FF0FFFFFFFC00000000000000 +000000000000007FF07FFFFFFC00000000000000000000000000007FE07FFFFFFC000000 +0000000000000000000000FFE03FFFFFFE0000000000000000000000000000FFE03FFFFF +FE0000000000000000000000000001FFC03FFFFFFF0000000000000000000000000001FF +C01FFFFFFF0000000000000000000000000001FF801FFFFFFF0000000000000000000000 +000003FF800FFFFFFF8000000000000000000000000003FF800FFFFFFF80000000000000 +00000000000007FF000FFFFFFFC000000000000000000000000007FF0007FFFFFFC00000 +0000000000000000000007FE0007FFFFFFC00000000000000000000000000FFE0003FFFF +FFE00000000000000000000000000FFE0003FFFFFFE00000000000000000000000001FFC +0003FFFFFFF00000000000000000000000001FFC0001FFFFFFF000000000000000000000 +00001FF80001FFFFFFF00000000000000000000000003FF80000FFFFFFF8000000000000 +0000000000003FF80000FFFFFFF80000000000000000000000007FF00000FFFFFFFC0000 +000000000000000000007FF000007FFFFFFC0000000000000000000000007FE000007FFF +FFFC000000000000000000000000FFE000003FFFFFFE000000000000000000000000FFE0 +00003FFFFFFE000000000000000000000001FFC000003FFFFFFF00000000000000000000 +0001FFC000001FFFFFFF000000000000000000000001FF8000001FFFFFFF000000000000 +000000000003FF8000000FFFFFFF800000000000000000000003FF8000000FFFFFFF8000 +00000000000000000007FF0000000FFFFFFFC00000000000000000000007FF00000007FF +FFFFC00000000000000000000007FE00000007FFFFFFC0000000000000000000000FFE00 +000003FFFFFFE0000000000000000000000FFE00000003FFFFFFE0000000000000000000 +001FFC00000003FFFFFFF0000000000000000000001FFC00000001FFFFFFF00000000000 +00000000001FF800000001FFFFFFF0000000000000000000003FF800000000FFFFFFF800 +0000000000000000003FF800000000FFFFFFF8000000000000000000007FF000000000FF +FFFFFC000000000000000000007FF0000000007FFFFFFC000000000000000000007FE000 +0000007FFFFFFC00000000000000000000FFE0000000003FFFFFFE000000000000000000 +00FFE0000000003FFFFFFE00000000000000000001FFC0000000003FFFFFFF0000000000 +0000000001FFC0000000001FFFFFFF00000000000000000001FF80000000001FFFFFFF00 +000000000000000003FF80000000000FFFFFFF80000000000000000003FF80000000000F +FFFFFF80000000000000000007FF00000000000FFFFFFFC0000000000000000007FF0000 +00000007FFFFFFC0000000000000000007FE000000000007FFFFFFC00000000000000000 +0FFE000000000003FFFFFFE000000000000000000FFFFFFFFFFFFFFFFFFFFFE000000000 +000000001FFFFFFFFFFFFFFFFFFFFFF000000000000000001FFFFFFFFFFFFFFFFFFFFFF0 +00000000000000001FFFFFFFFFFFFFFFFFFFFFF000000000000000003FFFFFFFFFFFFFFF +FFFFFFF800000000000000003FFFFFFFFFFFFFFFFFFFFFF800000000000000007FFFFFFF +FFFFFFFFFFFFFFFC00000000000000007FFFFFFFFFFFFFFFFFFFFFFC0000000000000000 +7FFFFFFFFFFFFFFFFFFFFFFC0000000000000000FFE00000000000003FFFFFFE00000000 +00000000FFE00000000000003FFFFFFE0000000000000001FFC00000000000003FFFFFFF +0000000000000001FFC00000000000001FFFFFFF0000000000000001FF80000000000000 +1FFFFFFF0000000000000003FF800000000000000FFFFFFF8000000000000003FF800000 +000000000FFFFFFF8000000000000007FF000000000000000FFFFFFFC000000000000007 +FF0000000000000007FFFFFFC000000000000007FE0000000000000007FFFFFFC0000000 +0000000FFE0000000000000003FFFFFFE00000000000000FFE0000000000000003FFFFFF +E00000000000001FFC0000000000000003FFFFFFF00000000000001FFC00000000000000 +01FFFFFFF00000000000001FF80000000000000001FFFFFFF00000000000003FF8000000 +0000000000FFFFFFF80000000000003FF80000000000000000FFFFFFF80000000000007F +F00000000000000000FFFFFFFC0000000000007FF000000000000000007FFFFFFC000000 +0000007FE000000000000000007FFFFFFC000000000000FFE000000000000000003FFFFF +FE000000000000FFE000000000000000003FFFFFFE000000000001FFC000000000000000 +001FFFFFFF000000000001FFC000000000000000001FFFFFFF000000000001FF80000000 +00000000001FFFFFFF000000000003FF8000000000000000000FFFFFFF800000000003FF +8000000000000000000FFFFFFF800000000007FF00000000000000000007FFFFFFC00000 +000007FF00000000000000000007FFFFFFC00000000007FE00000000000000000007FFFF +FFC0000000001FFF80000000000000000003FFFFFFE000000003FFFFFE00000000000000 +0003FFFFFFE00000FFFFFFFFFFFFC0000000001FFFFFFFFFFFFFFFFEFFFFFFFFFFFFC000 +0000001FFFFFFFFFFFFFFFFEFFFFFFFFFFFFC0000000001FFFFFFFFFFFFFFFFEFFFFFFFF +FFFFC0000000001FFFFFFFFFFFFFFFFEFFFFFFFFFFFFC0000000001FFFFFFFFFFFFFFFFE +FFFFFFFFFFFFC0000000001FFFFFFFFFFFFFFFFEFFFFFFFFFFFFC0000000001FFFFFFFFF +FFFFFFFEFFFFFFFFFFFFC0000000001FFFFFFFFFFFFFFFFEFFFFFFFFFFFFC0000000001F +FFFFFFFFFFFFFFFE>159 145 120 272 176 65 D[143 142 +120 269 165 I[121 142 120 269 140 76 D[<000000000FFFF8000000 +003E0000000001FFFFFFE00000003F000000001FFFFFFFFE0000007F000000007FFFFFFF +FFC00000FF00000003FFFFFFFFFFF00001FF0000000FFFFFFFFFFFFC0003FF0000001FFF +FFFFFFFFFF0007FF0000007FFFFFFFFFFFFFC00FFF000000FFFFFE000FFFFFF01FFF0000 +03FFFFC000007FFFF83FFF000007FFFF00000007FFFC7FFF00000FFFFC00000000FFFFFF +FF00001FFFF0000000003FFFFFFF00003FFFE0000000000FFFFFFF00007FFF8000000000 +07FFFFFF0000FFFF800000000001FFFFFF0000FFFF000000000000FFFFFF0001FFFE0000 +000000007FFFFF0003FFFC0000000000001FFFFF0003FFFC0000000000000FFFFF0007FF +F800000000000007FFFF0007FFF800000000000007FFFF000FFFF000000000000003FFFF +000FFFF000000000000001FFFF001FFFF000000000000000FFFF001FFFF0000000000000 +00FFFF003FFFE0000000000000007FFF003FFFE0000000000000003FFF003FFFE0000000 +000000003FFF007FFFE0000000000000001FFF007FFFE0000000000000001FFF007FFFE0 +000000000000001FFF007FFFE0000000000000000FFF007FFFE0000000000000000FFF00 +FFFFE00000000000000007FF00FFFFF00000000000000007FF00FFFFF000000000000000 +07FF00FFFFF00000000000000007FF00FFFFF80000000000000003FF00FFFFF800000000 +00000003FF00FFFFFC0000000000000003FF00FFFFFC0000000000000003FF00FFFFFE00 +00000000000001FF00FFFFFF0000000000000001FF00FFFFFF0000000000000001FF00FF +FFFF8000000000000001FF00FFFFFFC000000000000001FF00FFFFFFF000000000000000 +FE007FFFFFF80000000000000000007FFFFFFE0000000000000000007FFFFFFF00000000 +00000000007FFFFFFFE000000000000000003FFFFFFFFE00000000000000003FFFFFFFFF +E0000000000000003FFFFFFFFFFF000000000000001FFFFFFFFFFFF00000000000001FFF +FFFFFFFFFF8000000000000FFFFFFFFFFFFFF800000000000FFFFFFFFFFFFFFFC0000000 +0007FFFFFFFFFFFFFFFC0000000007FFFFFFFFFFFFFFFF8000000003FFFFFFFFFFFFFFFF +E000000003FFFFFFFFFFFFFFFFF800000001FFFFFFFFFFFFFFFFFE00000000FFFFFFFFFF +FFFFFFFF800000007FFFFFFFFFFFFFFFFFE00000007FFFFFFFFFFFFFFFFFF00000003FFF +FFFFFFFFFFFFFFF80000001FFFFFFFFFFFFFFFFFFC0000000FFFFFFFFFFFFFFFFFFF0000 +0007FFFFFFFFFFFFFFFFFF80000003FFFFFFFFFFFFFFFFFFC0000000FFFFFFFFFFFFFFFF +FFE00000007FFFFFFFFFFFFFFFFFE00000003FFFFFFFFFFFFFFFFFF00000000FFFFFFFFF +FFFFFFFFF800000003FFFFFFFFFFFFFFFFFC00000000FFFFFFFFFFFFFFFFFE000000003F +FFFFFFFFFFFFFFFE000000000FFFFFFFFFFFFFFFFF0000000001FFFFFFFFFFFFFFFF0000 +0000001FFFFFFFFFFFFFFF800000000001FFFFFFFFFFFFFF8000000000000FFFFFFFFFFF +FFC00000000000007FFFFFFFFFFFC000000000000007FFFFFFFFFFE0000000000000003F +FFFFFFFFE00000000000000003FFFFFFFFF000000000000000003FFFFFFFF00000000000 +0000000FFFFFFFF0000000000000000003FFFFFFF8000000000000000000FFFFFFF80000 +000000000000007FFFFFF80000000000000000001FFFFFF80000000000000000000FFFFF +F800000000000000000007FFFFFC00000000000000000007FFFFFC000000000000000000 +03FFFFFC7F000000000000000001FFFFFCFF800000000000000000FFFFFCFF8000000000 +00000000FFFFFCFF8000000000000000007FFFFCFF8000000000000000007FFFFCFF8000 +000000000000007FFFFCFF8000000000000000003FFFFCFF8000000000000000003FFFFC +FF8000000000000000003FFFFCFFC000000000000000001FFFFCFFC00000000000000000 +1FFFFCFFC000000000000000001FFFFCFFC000000000000000001FFFF8FFE00000000000 +0000001FFFF8FFE000000000000000001FFFF8FFE000000000000000001FFFF8FFF00000 +0000000000001FFFF8FFF000000000000000001FFFF0FFF000000000000000001FFFF0FF +F800000000000000003FFFF0FFFC00000000000000003FFFE0FFFC00000000000000003F +FFE0FFFE00000000000000003FFFE0FFFF00000000000000007FFFC0FFFF000000000000 +00007FFFC0FFFF80000000000000007FFF80FFFFC000000000000000FFFF80FFFFE00000 +0000000000FFFF00FFFFF800000000000001FFFF00FFFFFC00000000000003FFFE00FFFF +FE00000000000003FFFC00FFFFFF80000000000007FFFC00FFFFFFE000000000000FFFF8 +00FFFFFFF800000000001FFFF000FFFFFFFE00000000007FFFE000FFFFFFFFC000000000 +FFFFC000FFFFFFFFF800000003FFFF8000FFFE7FFFFF8000001FFFFF0000FFFC1FFFFFFF +0001FFFFFE0000FFF807FFFFFFFFFFFFFFFC0000FFF003FFFFFFFFFFFFFFF00000FFE000 +FFFFFFFFFFFFFFE00000FFC0001FFFFFFFFFFFFF800000FF800007FFFFFFFFFFFE000000 +FF000000FFFFFFFFFFF8000000FE0000001FFFFFFFFFC0000000FC00000000FFFFFFFC00 +0000007C0000000001FFFF8000000000>102 146 115 271 129 +83 D<00000000FFFFFC000000000000000000007FFFFFFFF0000000000000000007FFFF +FFFFFF00000000000000003FFFFFFFFFFFE000000000000000FFFFFFFFFFFFF800000000 +000001FFFFFFFFFFFFFE00000000000007FFFFFFFFFFFFFF8000000000000FFFFFC0007F +FFFFE000000000001FFFE000000FFFFFF000000000003FFFF0000003FFFFF80000000000 +7FFFF8000000FFFFFE00000000007FFFFC0000007FFFFF0000000000FFFFFC0000003FFF +FF8000000000FFFFFE0000001FFFFFC000000000FFFFFE0000000FFFFFC000000001FFFF +FF00000007FFFFE000000001FFFFFF00000003FFFFF000000001FFFFFF00000003FFFFF0 +00000001FFFFFF00000001FFFFF800000001FFFFFF00000001FFFFF800000001FFFFFF00 +000001FFFFFC00000001FFFFFF00000000FFFFFC00000001FFFFFF00000000FFFFFE0000 +0000FFFFFE00000000FFFFFE00000000FFFFFE00000000FFFFFE000000007FFFFC000000 +007FFFFE000000007FFFFC000000007FFFFF000000003FFFF8000000007FFFFF00000000 +1FFFF0000000007FFFFF0000000007FFC0000000007FFFFF0000000001FF00000000007F +FFFF00000000000000000000007FFFFF00000000000000000000007FFFFF000000000000 +00000000007FFFFF00000000000000000000007FFFFF00000000000000000000007FFFFF +00000000000000000000007FFFFF00000000000000000000007FFFFF0000000000000000 +0000007FFFFF00000000000000000000007FFFFF00000000000000000003FFFFFFFF0000 +0000000000000FFFFFFFFFFF0000000000000003FFFFFFFFFFFF000000000000007FFFFF +FFFFFFFF00000000000007FFFFFFFFFFFFFF0000000000003FFFFFFFFFFFFFFF00000000 +0001FFFFFFFF807FFFFF00000000000FFFFFFFE0007FFFFF00000000003FFFFFFC00007F +FFFF0000000000FFFFFFE000007FFFFF0000000003FFFFFF0000007FFFFF0000000007FF +FFFC0000007FFFFF000000001FFFFFF00000007FFFFF000000003FFFFFC00000007FFFFF +000000007FFFFF800000007FFFFF00000000FFFFFE000000007FFFFF00000001FFFFFC00 +0000007FFFFF00000003FFFFF8000000007FFFFF00000007FFFFF0000000007FFFFF0000 +000FFFFFE0000000007FFFFF0000001FFFFFE0000000007FFFFF0000001FFFFFC0000000 +007FFFFF0000003FFFFF80000000007FFFFF0000003FFFFF80000000007FFFFF0000007F +FFFF00000000007FFFFF0000007FFFFF00000000007FFFFF0000007FFFFF00000000007F +FFFF000000FFFFFF00000000007FFFFF000000FFFFFE00000000007FFFFF000000FFFFFE +00000000007FFFFF000000FFFFFE00000000007FFFFF000000FFFFFE0000000000FFFFFF +000000FFFFFE0000000000FFFFFF000000FFFFFE0000000000FFFFFF000000FFFFFE0000 +000000FFFFFF000000FFFFFE0000000001FFFFFF000000FFFFFF0000000001FFFFFF0000 +007FFFFF0000000003FFFFFF0000007FFFFF0000000007EFFFFF0000003FFFFF80000000 +0FEFFFFF0000003FFFFFC00000000FCFFFFF0000001FFFFFC00000003FCFFFFF0000001F +FFFFE00000007F8FFFFF8000000FFFFFF0000000FF0FFFFFE0000007FFFFFC000003FE0F +FFFFFE000003FFFFFE00000FFC07FFFFFFFF0001FFFFFF80003FF807FFFFFFFF8000FFFF +FFF803FFF003FFFFFFFF80003FFFFFFFFFFFE001FFFFFFFF80000FFFFFFFFFFF8000FFFF +FFFF800003FFFFFFFFFF00007FFFFFFF800000FFFFFFFFFC00001FFFFFFF8000001FFFFF +FFF0000007FFFFFF80000001FFFFFF800000007FFFFF0000000007FFF800000000000000 +00695F79DD71>97 D[<0000001FF000000000000000000000007FFFFFF0000000000000 +00000000FFFFFFFFF000000000000000000000FFFFFFFFF000000000000000000000FFFF +FFFFF000000000000000000000FFFFFFFFF000000000000000000000FFFFFFFFF0000000 +00000000000000FFFFFFFFF000000000000000000000FFFFFFFFF0000000000000000000 +00FFFFFFFFF000000000000000000000FFFFFFFFF000000000000000000000003FFFFFF0 +000000000000000000000007FFFFF0000000000000000000000007FFFFF0000000000000 +000000000003FFFFF0000000000000000000000003FFFFF0000000000000000000000003 +FFFFF0000000000000000000000003FFFFF0000000000000000000000003FFFFF0000000 +000000000000000003FFFFF0000000000000000000000003FFFFF0000000000000000000 +000003FFFFF0000000000000000000000003FFFFF0000000000000000000000003FFFFF0 +000000000000000000000003FFFFF0000000000000000000000003FFFFF0000000000000 +000000000003FFFFF0000000000000000000000003FFFFF0000000000000000000000003 +FFFFF0000000000000000000000003FFFFF0000000000000000000000003FFFFF0000000 +000000000000000003FFFFF0000000000000000000000003FFFFF0000000000000000000 +000003FFFFF0000000000000000000000003FFFFF0000000000000000000000003FFFFF0 +000000000000000000000003FFFFF0000000000000000000000003FFFFF0000000000000 +000000000003FFFFF0000000000000000000000003FFFFF0000000000000000000000003 +FFFFF0000000000000000000000003FFFFF0000000000000000000000003FFFFF0000000 +000000000000000003FFFFF0000000000000000000000003FFFFF0000000000000000000 +000003FFFFF0000000000000000000000003FFFFF0000000000000000000000003FFFFF0 +000000000000000000000003FFFFF0000000000000000000000003FFFFF0000000000000 +000000000003FFFFF000003FFFC000000000000003FFFFF00007FFFFFE00000000000003 +FFFFF0003FFFFFFFE0000000000003FFFFF001FFFFFFFFF8000000000003FFFFF007FFFF +FFFFFF000000000003FFFFF01FFFFFFFFFFFC00000000003FFFFF07FFFFFFFFFFFF00000 +000003FFFFF0FFFFC00FFFFFF80000000003FFFFF3FFFC0000FFFFFE0000000003FFFFF7 +FFE000003FFFFF0000000003FFFFFFFF8000000FFFFF8000000003FFFFFFFE00000007FF +FFE000000003FFFFFFF800000001FFFFF000000003FFFFFFF000000000FFFFF800000003 +FFFFFFE0000000007FFFFC00000003FFFFFF80000000003FFFFC00000003FFFFFF000000 +00003FFFFE00000003FFFFFE00000000001FFFFF00000003FFFFFC00000000000FFFFF80 +000003FFFFFC00000000000FFFFF80000003FFFFF8000000000007FFFFC0000003FFFFF8 +000000000007FFFFE0000003FFFFF8000000000007FFFFE0000003FFFFF8000000000003 +FFFFF0000003FFFFF8000000000003FFFFF0000003FFFFF8000000000003FFFFF8000003 +FFFFF8000000000001FFFFF8000003FFFFF8000000000001FFFFF8000003FFFFF8000000 +000001FFFFFC000003FFFFF8000000000001FFFFFC000003FFFFF8000000000001FFFFFE +000003FFFFF8000000000001FFFFFE000003FFFFF8000000000001FFFFFE000003FFFFF8 +000000000000FFFFFE000003FFFFF8000000000000FFFFFF000003FFFFF8000000000000 +FFFFFF000003FFFFF8000000000000FFFFFF000003FFFFF8000000000000FFFFFF000003 +FFFFF8000000000000FFFFFF000003FFFFF8000000000000FFFFFF800003FFFFF8000000 +000000FFFFFF800003FFFFF8000000000000FFFFFF800003FFFFF8000000000000FFFFFF +800003FFFFF8000000000000FFFFFF800003FFFFF8000000000000FFFFFF800003FFFFF8 +000000000000FFFFFF800003FFFFF8000000000000FFFFFF800003FFFFF8000000000000 +FFFFFF800003FFFFF8000000000000FFFFFF800003FFFFF8000000000000FFFFFF800003 +FFFFF8000000000000FFFFFF800003FFFFF8000000000000FFFFFF800003FFFFF8000000 +000000FFFFFF800003FFFFF8000000000000FFFFFF800003FFFFF8000000000000FFFFFF +800003FFFFF8000000000000FFFFFF000003FFFFF8000000000000FFFFFF000003FFFFF8 +000000000000FFFFFF000003FFFFF8000000000000FFFFFF000003FFFFF8000000000000 +FFFFFF000003FFFFF8000000000000FFFFFE000003FFFFF8000000000001FFFFFE000003 +FFFFF8000000000001FFFFFE000003FFFFF8000000000001FFFFFC000003FFFFF8000000 +000001FFFFFC000003FFFFF8000000000001FFFFFC000003FFFFF8000000000001FFFFF8 +000003FFFFF8000000000003FFFFF8000003FFFFF8000000000003FFFFF0000003FFFFF8 +000000000003FFFFF0000003FFFFF8000000000007FFFFE0000003FFFFF8000000000007 +FFFFE0000003FFFFF8000000000007FFFFC0000003FFFFF800000000000FFFFFC0000003 +FFFFFC00000000000FFFFF80000003FFFFFE00000000001FFFFF00000003FFFFFF000000 +00003FFFFE00000003FFFFFF00000000003FFFFE00000003FFFFFF80000000007FFFFC00 +000003FFFFFFC000000000FFFFF800000003FFFFFFF000000001FFFFF000000003FFFFBF +F800000003FFFFE000000003FFFF1FFC0000000FFFFFC000000003FFFE0FFF0000001FFF +FF0000000003FFFE07FFC000007FFFFE0000000003FFFC03FFF00003FFFFFC0000000003 +FFF800FFFF003FFFFFF00000000003FFF0007FFFFFFFFFFFC00000000003FFE0001FFFFF +FFFFFF000000000003FFC0000FFFFFFFFFFC000000000003FF800003FFFFFFFFF0000000 +000003FF0000007FFFFFFF80000000000003FE0000000FFFFFF800000000000000000000 +0000FFFE000000000000>113 144 121 270 129 I<00000000007FFFF0000000000000 +001FFFFFFFE0000000000000FFFFFFFFFE000000000007FFFFFFFFFF80000000001FFFFF +FFFFFFF000000000FFFFFFFFFFFFF800000001FFFFFFFFFFFFFE00000007FFFFFFFFFFFF +FF0000001FFFFFF8000FFFFF8000003FFFFF800000FFFFC000007FFFFE000001FFFFE000 +01FFFFF8000003FFFFE00003FFFFF0000003FFFFF00007FFFFE0000007FFFFF0000FFFFF +C0000007FFFFF0001FFFFF8000000FFFFFF8003FFFFF0000000FFFFFF8003FFFFE000000 +0FFFFFF8007FFFFC0000000FFFFFF800FFFFFC0000000FFFFFF800FFFFF80000000FFFFF +F801FFFFF80000000FFFFFF803FFFFF00000000FFFFFF803FFFFF000000007FFFFF007FF +FFF000000007FFFFF007FFFFE000000003FFFFE00FFFFFE000000003FFFFE00FFFFFE000 +000001FFFFC01FFFFFE000000000FFFF801FFFFFC0000000003FFE001FFFFFC000000000 +0FF8003FFFFFC0000000000000003FFFFFC0000000000000003FFFFFC000000000000000 +3FFFFFC0000000000000007FFFFF80000000000000007FFFFF80000000000000007FFFFF +80000000000000007FFFFF80000000000000007FFFFF8000000000000000FFFFFF800000 +0000000000FFFFFF8000000000000000FFFFFF8000000000000000FFFFFF800000000000 +0000FFFFFF8000000000000000FFFFFF8000000000000000FFFFFF8000000000000000FF +FFFF8000000000000000FFFFFF8000000000000000FFFFFF8000000000000000FFFFFF80 +00000000000000FFFFFF8000000000000000FFFFFF8000000000000000FFFFFF80000000 +00000000FFFFFF8000000000000000FFFFFF80000000000000007FFFFF80000000000000 +007FFFFF80000000000000007FFFFFC0000000000000007FFFFFC0000000000000007FFF +FFC0000000000000003FFFFFC0000000000000003FFFFFC0000000000000003FFFFFC000 +0000000000001FFFFFE0000000000000001FFFFFE0000000000000001FFFFFE000000000 +0000000FFFFFE0000000000000000FFFFFF0000000000000FE07FFFFF0000000000001FF +07FFFFF0000000000001FF03FFFFF8000000000003FF03FFFFF8000000000003FE01FFFF +FC000000000003FE01FFFFFC000000000007FE00FFFFFE00000000000FFC007FFFFF0000 +0000000FFC007FFFFF80000000001FF8003FFFFF80000000003FF0001FFFFFC000000000 +7FF0000FFFFFE000000000FFE00007FFFFF800000001FFC00003FFFFFC00000003FFC000 +01FFFFFF0000000FFF800000FFFFFFC000003FFF0000003FFFFFF00001FFFC0000001FFF +FFFF801FFFF800000007FFFFFFFFFFFFF000000003FFFFFFFFFFFFC000000000FFFFFFFF +FFFF80000000003FFFFFFFFFFE000000000007FFFFFFFFF8000000000000FFFFFFFFC000 +00000000001FFFFFFE00000000000000007FFFC0000000585F78DD67>I<00000000007F +FFC000000000000000000FFFFFFF0000000000000000FFFFFFFFE000000000000007FFFF +FFFFFC0000000000001FFFFFFFFFFF0000000000007FFFFFFFFFFFC00000000001FFFFFF +FFFFFFF00000000007FFFFF803FFFFF8000000001FFFFF80003FFFFE000000003FFFFE00 +000FFFFF000000007FFFF8000003FFFF80000001FFFFF0000001FFFFC0000003FFFFC000 +00007FFFE0000007FFFF800000003FFFF000000FFFFF000000003FFFF000001FFFFF0000 +00001FFFF800003FFFFE000000000FFFFC00003FFFFC000000000FFFFE00007FFFFC0000 +000007FFFE0000FFFFF80000000003FFFF0001FFFFF80000000003FFFF0001FFFFF00000 +000003FFFF8003FFFFF00000000001FFFF8003FFFFE00000000001FFFF8007FFFFE00000 +000001FFFFC007FFFFE00000000000FFFFC00FFFFFE00000000000FFFFE00FFFFFC00000 +000000FFFFE01FFFFFC00000000000FFFFE01FFFFFC000000000007FFFE01FFFFFC00000 +0000007FFFF03FFFFFC000000000007FFFF03FFFFFC000000000007FFFF03FFFFF800000 +0000007FFFF07FFFFF8000000000007FFFF07FFFFF8000000000007FFFF07FFFFF800000 +0000007FFFF87FFFFF8000000000003FFFF87FFFFF8000000000003FFFF8FFFFFF800000 +0000003FFFF8FFFFFF8000000000003FFFF8FFFFFF8000000000003FFFF8FFFFFFFFFFFF +FFFFFFFFFFF8FFFFFFFFFFFFFFFFFFFFFFF8FFFFFFFFFFFFFFFFFFFFFFF8FFFFFFFFFFFF +FFFFFFFFFFF8FFFFFFFFFFFFFFFFFFFFFFF8FFFFFFFFFFFFFFFFFFFFFFF0FFFFFF800000 +000000000000FFFFFF800000000000000000FFFFFF800000000000000000FFFFFF800000 +000000000000FFFFFF800000000000000000FFFFFF800000000000000000FFFFFF800000 +0000000000007FFFFF8000000000000000007FFFFF8000000000000000007FFFFF800000 +0000000000007FFFFF8000000000000000007FFFFFC000000000000000003FFFFFC00000 +0000000000003FFFFFC000000000000000003FFFFFC000000000000000001FFFFFC00000 +0000000000001FFFFFC000000000000000001FFFFFE000000000000000000FFFFFE00000 +0000000000000FFFFFE0000000000000000007FFFFE000000000000007F007FFFFF00000 +000000000FF803FFFFF00000000000000FF803FFFFF80000000000000FF801FFFFF80000 +000000001FF801FFFFFC0000000000001FF000FFFFFC0000000000003FF0007FFFFE0000 +000000007FE0003FFFFF0000000000007FE0003FFFFF000000000000FFC0001FFFFF8000 +00000001FF80000FFFFFC00000000003FF800007FFFFF00000000007FF000003FFFFF800 +0000001FFE000001FFFFFC000000003FFC0000007FFFFF00000000FFF80000003FFFFFE0 +000003FFF00000001FFFFFFC00001FFFE000000007FFFFFFC003FFFFC000000003FFFFFF +FFFFFFFF0000000000FFFFFFFFFFFFFE00000000003FFFFFFFFFFFF800000000000FFFFF +FFFFFFE0000000000001FFFFFFFFFF800000000000003FFFFFFFFC0000000000000003FF +FFFFC000000000000000000FFFF8000000005D5F7ADD6A>101 D<000000000000000000 +0000FFC000000000000FFFFC0000000FFFF00000000001FFFFFFE000007FFFF800000000 +1FFFFFFFFE0001FFFFFE000000007FFFFFFFFF8007FFFFFF00000003FFFFFFFFFFF01FFF +FFFF0000000FFFFFFFFFFFFC3FFFFFFF8000001FFFFFFFFFFFFE7FFF3FFF8000007FFFFF +003FFFFFFFF03FFFC00000FFFFF80007FFFFFF807FFFC00001FFFFE00001FFFFFC007FFF +C00007FFFF8000007FFFF8007FFFC00007FFFF0000003FFFF8007FFFC0000FFFFE000000 +1FFFFC007FFFC0001FFFFC0000000FFFFE007FFFC0003FFFF800000007FFFF003FFF8000 +7FFFF800000007FFFF803FFF80007FFFF800000007FFFF801FFF0000FFFFF000000003FF +FFC00FFE0000FFFFF000000003FFFFC003F80001FFFFF000000003FFFFE000000001FFFF +F000000003FFFFE000000003FFFFE000000001FFFFF000000003FFFFE000000001FFFFF0 +00000003FFFFE000000001FFFFF000000003FFFFE000000001FFFFF000000007FFFFE000 +000001FFFFF800000007FFFFE000000001FFFFF800000007FFFFE000000001FFFFF80000 +0007FFFFE000000001FFFFF800000007FFFFE000000001FFFFF800000007FFFFE0000000 +01FFFFF800000007FFFFE000000001FFFFF800000007FFFFE000000001FFFFF800000007 +FFFFE000000001FFFFF800000007FFFFE000000001FFFFF800000007FFFFE000000001FF +FFF800000003FFFFE000000001FFFFF000000003FFFFE000000001FFFFF000000003FFFF +E000000001FFFFF000000003FFFFE000000001FFFFF000000001FFFFF000000003FFFFE0 +00000001FFFFF000000003FFFFE000000000FFFFF000000003FFFFC000000000FFFFF000 +000003FFFFC0000000007FFFF800000007FFFF80000000007FFFF800000007FFFF800000 +00003FFFF800000007FFFF00000000001FFFFC0000000FFFFE00000000000FFFFE000000 +1FFFFC000000000007FFFF0000003FFFF8000000000007FFFF8000007FFFF80000000000 +01FFFFE00001FFFFE0000000000000FFFFF80007FFFFC0000000000001FFFFFF003FFFFF +80000000000003FFFFFFFFFFFFFE00000000000007FFFFFFFFFFFFFC00000000000007F3 +FFFFFFFFFFF00000000000000FE07FFFFFFFFF800000000000000FE01FFFFFFFFE000000 +000000001FC001FFFFFFE0000000000000001FC0000FFFFC00000000000000003FC00000 +000000000000000000003FC00000000000000000000000003FC000000000000000000000 +00007FC00000000000000000000000007FC00000000000000000000000007FE000000000 +00000000000000007FE00000000000000000000000007FE0000000000000000000000000 +7FF00000000000000000000000007FF00000000000000000000000007FF8000000000000 +0000000000007FFC0000000000000000000000007FFE0000000000000000000000007FFF +8000000000000000000000007FFFF000000000000000000000007FFFFFFFFFFFFFF00000 +000000003FFFFFFFFFFFFFFFE000000000003FFFFFFFFFFFFFFFFE00000000003FFFFFFF +FFFFFFFFFFC0000000001FFFFFFFFFFFFFFFFFF8000000001FFFFFFFFFFFFFFFFFFE0000 +00001FFFFFFFFFFFFFFFFFFF800000000FFFFFFFFFFFFFFFFFFFE000000007FFFFFFFFFF +FFFFFFFFF000000007FFFFFFFFFFFFFFFFFFF800000003FFFFFFFFFFFFFFFFFFFE000000 +01FFFFFFFFFFFFFFFFFFFF00000000FFFFFFFFFFFFFFFFFFFF000000007FFFFFFFFFFFFF +FFFFFF800000007FFFFFFFFFFFFFFFFFFFC0000003FFFFFFFFFFFFFFFFFFFFE000000FFF +FFFFFFFFFFFFFFFFFFE000003FFFFFFFFFFFFFFFFFFFFFF00000FFFFFFFFFFFFFFFFFFFF +FFF00001FFFFE0000000003FFFFFFFF80003FFFF000000000000FFFFFFF80007FFFC0000 +000000000FFFFFF8000FFFF800000000000001FFFFFC001FFFF0000000000000007FFFFC +003FFFE0000000000000003FFFFC003FFFE0000000000000001FFFFC007FFFC000000000 +0000000FFFFE007FFFC00000000000000007FFFE007FFFC00000000000000007FFFE00FF +FF800000000000000007FFFE00FFFF800000000000000003FFFE00FFFF80000000000000 +0003FFFE00FFFF800000000000000003FFFE00FFFF800000000000000003FFFE00FFFF80 +0000000000000003FFFE00FFFF800000000000000003FFFE00FFFFC00000000000000007 +FFFE007FFFC00000000000000007FFFC007FFFC00000000000000007FFFC007FFFE00000 +00000000000FFFFC003FFFF0000000000000001FFFF8001FFFF0000000000000001FFFF0 +001FFFF8000000000000003FFFF0000FFFFC000000000000007FFFE00007FFFE00000000 +000000FFFFC00003FFFF80000000000003FFFF800003FFFFC0000000000007FFFF800000 +FFFFF000000000001FFFFE0000007FFFFC00000000007FFFFC0000003FFFFF8000000003 +FFFFF80000000FFFFFF80000003FFFFFE000000007FFFFFFE0000FFFFFFFC000000001FF +FFFFFFFFFFFFFFFF00000000003FFFFFFFFFFFFFFFF800000000000FFFFFFFFFFFFFFFE0 +000000000001FFFFFFFFFFFFFF000000000000001FFFFFFFFFFFF00000000000000001FF +FFFFFFFF00000000000000000001FFFFFF0000000000006A887ADD74>103 +D[<00003FC00000000000FFF00000000003FFFC0000000007FFFE000000000FFFFF0000 +00001FFFFF800000003FFFFFC00000007FFFFFE00000007FFFFFE0000000FFFFFFF00000 +00FFFFFFF0000001FFFFFFF8000001FFFFFFF8000001FFFFFFF8000001FFFFFFF8000001 +FFFFFFF8000001FFFFFFF8000001FFFFFFF8000001FFFFFFF8000000FFFFFFF0000000FF +FFFFF00000007FFFFFE00000007FFFFFE00000003FFFFFC00000001FFFFF800000000FFF +FF0000000007FFFE0000000003FFFC0000000000FFF000000000003FC000000000000000 +000000000000000000000000000000000000000000000000000000000000000000000000 +000000000000000000000000000000000000000000000000000000000000000000000000 +000000000000000000000000000000000000000000000000000000000000000000000000 +000000000000000000000000000000000000000000000000000000000000000000000000 +00001FF00000007FFFFFF00000FFFFFFFFF00000FFFFFFFFF00000FFFFFFFFF00000FFFF +FFFFF00000FFFFFFFFF00000FFFFFFFFF00000FFFFFFFFF00000FFFFFFFFF00000FFFFFF +FFF00000003FFFFFF000000007FFFFF000000007FFFFF000000003FFFFF000000003FFFF +F000000003FFFFF000000003FFFFF000000003FFFFF000000003FFFFF000000003FFFFF0 +00000003FFFFF000000003FFFFF000000003FFFFF000000003FFFFF000000003FFFFF000 +000003FFFFF000000003FFFFF000000003FFFFF000000003FFFFF000000003FFFFF00000 +0003FFFFF000000003FFFFF000000003FFFFF000000003FFFFF000000003FFFFF0000000 +03FFFFF000000003FFFFF000000003FFFFF000000003FFFFF000000003FFFFF000000003 +FFFFF000000003FFFFF000000003FFFFF000000003FFFFF000000003FFFFF000000003FF +FFF000000003FFFFF000000003FFFFF000000003FFFFF000000003FFFFF000000003FFFF +F000000003FFFFF000000003FFFFF000000003FFFFF000000003FFFFF000000003FFFFF0 +00000003FFFFF000000003FFFFF000000003FFFFF000000003FFFFF000000003FFFFF000 +000003FFFFF000000003FFFFF000000003FFFFF000000003FFFFF000000003FFFFF00000 +0003FFFFF000000003FFFFF000000003FFFFF000000003FFFFF000000003FFFFF0000000 +03FFFFF000000003FFFFF000000003FFFFF000000003FFFFF000000003FFFFF000000003 +FFFFF000000003FFFFF000000003FFFFF000000003FFFFF000000003FFFFF000000003FF +FFF000000003FFFFF00000FFFFFFFFFFFF80FFFFFFFFFFFF80FFFFFFFFFFFF80FFFFFFFF +FFFF80FFFFFFFFFFFF80FFFFFFFFFFFF80FFFFFFFFFFFF80FFFFFFFFFFFF80FFFFFFFFFF +FF80>49 144 119 271 65 105 D[<0000001FF00000007FFFFFF00000FFFFFFFFF00000 +FFFFFFFFF00000FFFFFFFFF00000FFFFFFFFF00000FFFFFFFFF00000FFFFFFFFF00000FF +FFFFFFF00000FFFFFFFFF00000FFFFFFFFF00000003FFFFFF000000007FFFFF000000007 +FFFFF000000003FFFFF000000003FFFFF000000003FFFFF000000003FFFFF000000003FF +FFF000000003FFFFF000000003FFFFF000000003FFFFF000000003FFFFF000000003FFFF +F000000003FFFFF000000003FFFFF000000003FFFFF000000003FFFFF000000003FFFFF0 +00000003FFFFF000000003FFFFF000000003FFFFF000000003FFFFF000000003FFFFF000 +000003FFFFF000000003FFFFF000000003FFFFF000000003FFFFF000000003FFFFF00000 +0003FFFFF000000003FFFFF000000003FFFFF000000003FFFFF000000003FFFFF0000000 +03FFFFF000000003FFFFF000000003FFFFF000000003FFFFF000000003FFFFF000000003 +FFFFF000000003FFFFF000000003FFFFF000000003FFFFF000000003FFFFF000000003FF +FFF000000003FFFFF000000003FFFFF000000003FFFFF000000003FFFFF000000003FFFF +F000000003FFFFF000000003FFFFF000000003FFFFF000000003FFFFF000000003FFFFF0 +00000003FFFFF000000003FFFFF000000003FFFFF000000003FFFFF000000003FFFFF000 +000003FFFFF000000003FFFFF000000003FFFFF000000003FFFFF000000003FFFFF00000 +0003FFFFF000000003FFFFF000000003FFFFF000000003FFFFF000000003FFFFF0000000 +03FFFFF000000003FFFFF000000003FFFFF000000003FFFFF000000003FFFFF000000003 +FFFFF000000003FFFFF000000003FFFFF000000003FFFFF000000003FFFFF000000003FF +FFF000000003FFFFF000000003FFFFF000000003FFFFF000000003FFFFF000000003FFFF +F000000003FFFFF000000003FFFFF000000003FFFFF000000003FFFFF000000003FFFFF0 +00000003FFFFF000000003FFFFF000000003FFFFF000000003FFFFF000000003FFFFF000 +000003FFFFF000000003FFFFF000000003FFFFF000000003FFFFF000000003FFFFF00000 +0003FFFFF000000003FFFFF000000003FFFFF000000003FFFFF000000003FFFFF0000000 +03FFFFF000000003FFFFF000000003FFFFF000000003FFFFF000000003FFFFF000000003 +FFFFF000000003FFFFF000000003FFFFF000000003FFFFF000000003FFFFF000000003FF +FFF000000003FFFFF000000003FFFFF000000003FFFFF000000003FFFFF000000003FFFF +F000000003FFFFF000000003FFFFF00000FFFFFFFFFFFFC0FFFFFFFFFFFFC0FFFFFFFFFF +FFC0FFFFFFFFFFFFC0FFFFFFFFFFFFC0FFFFFFFFFFFFC0FFFFFFFFFFFFC0FFFFFFFFFFFF +C0FFFFFFFFFFFFC0>50 143 119 270 65 108 D<0000003FE0000001FFFC0000000000 +01FFFC0000000000007FFFFFE000001FFFFFE0000000001FFFFFE000000000FFFFFFFFE0 +0000FFFFFFFC00000000FFFFFFFC00000000FFFFFFFFE00003FFFFFFFF00000003FFFFFF +FF00000000FFFFFFFFE0000FFFFFFFFFC000000FFFFFFFFFC0000000FFFFFFFFE0003FFF +FFFFFFF000003FFFFFFFFFF0000000FFFFFFFFE000FFFFFFFFFFF80000FFFFFFFFFFF800 +0000FFFFFFFFE001FFFC03FFFFFC0001FFFC03FFFFFC000000FFFFFFFFE003FFC000FFFF +FE0003FFC000FFFFFE000000FFFFFFFFE007FE00003FFFFF0007FE00003FFFFF000000FF +FFFFFFE00FF800001FFFFF000FF800001FFFFF000000003FFFFFE01FF000001FFFFF801F +F000001FFFFF8000000007FFFFE03FC000000FFFFFC03FC000000FFFFFC000000007FFFF +E07F8000000FFFFFC07F8000000FFFFFC000000003FFFFE0FF0000000FFFFFE0FF000000 +0FFFFFE000000003FFFFE1FE00000007FFFFE1FE00000007FFFFE000000003FFFFE1FC00 +000007FFFFE1FC00000007FFFFE000000003FFFFE3F800000007FFFFF3F800000007FFFF +F000000003FFFFE7F000000007FFFFF7F000000007FFFFF000000003FFFFE7E000000003 +FFFFF7E000000003FFFFF000000003FFFFEFC000000003FFFFFFC000000003FFFFF00000 +0003FFFFEFC000000003FFFFFFC000000003FFFFF000000003FFFFFF8000000003FFFFFF +8000000003FFFFF800000003FFFFFF8000000003FFFFFF8000000003FFFFF800000003FF +FFFF0000000003FFFFFF0000000003FFFFF800000003FFFFFF0000000003FFFFFF000000 +0003FFFFF800000003FFFFFE0000000003FFFFFE0000000003FFFFF800000003FFFFFE00 +00000003FFFFFE0000000003FFFFF800000003FFFFFE0000000003FFFFFE0000000003FF +FFF800000003FFFFFC0000000003FFFFFC0000000003FFFFF800000003FFFFFC00000000 +03FFFFFC0000000003FFFFF800000003FFFFFC0000000003FFFFFC0000000003FFFFF800 +000003FFFFFC0000000003FFFFFC0000000003FFFFF800000003FFFFF80000000003FFFF +F80000000003FFFFF800000003FFFFF80000000003FFFFF80000000003FFFFF800000003 +FFFFF80000000003FFFFF80000000003FFFFF800000003FFFFF80000000003FFFFF80000 +000003FFFFF800000003FFFFF80000000003FFFFF80000000003FFFFF800000003FFFFF8 +0000000003FFFFF80000000003FFFFF800000003FFFFF80000000003FFFFF80000000003 +FFFFF800000003FFFFF80000000003FFFFF80000000003FFFFF800000003FFFFF8000000 +0003FFFFF80000000003FFFFF800000003FFFFF80000000003FFFFF80000000003FFFFF8 +00000003FFFFF80000000003FFFFF80000000003FFFFF800000003FFFFF80000000003FF +FFF80000000003FFFFF800000003FFFFF80000000003FFFFF80000000003FFFFF8000000 +03FFFFF80000000003FFFFF80000000003FFFFF800000003FFFFF80000000003FFFFF800 +00000003FFFFF800000003FFFFF80000000003FFFFF80000000003FFFFF800000003FFFF +F80000000003FFFFF80000000003FFFFF800000003FFFFF80000000003FFFFF800000000 +03FFFFF800000003FFFFF80000000003FFFFF80000000003FFFFF800000003FFFFF80000 +000003FFFFF80000000003FFFFF800000003FFFFF80000000003FFFFF80000000003FFFF +F800000003FFFFF80000000003FFFFF80000000003FFFFF800000003FFFFF80000000003 +FFFFF80000000003FFFFF800000003FFFFF80000000003FFFFF80000000003FFFFF80000 +0003FFFFF80000000003FFFFF80000000003FFFFF800000003FFFFF80000000003FFFFF8 +0000000003FFFFF800000003FFFFF80000000003FFFFF80000000003FFFFF800000003FF +FFF80000000003FFFFF80000000003FFFFF800000003FFFFF80000000003FFFFF8000000 +0003FFFFF800000003FFFFF80000000003FFFFF80000000003FFFFF800000003FFFFF800 +00000003FFFFF80000000003FFFFF800000003FFFFF80000000003FFFFF80000000003FF +FFF800000003FFFFF80000000003FFFFF80000000003FFFFF800000003FFFFF800000000 +03FFFFF80000000003FFFFF800000003FFFFF80000000003FFFFF80000000003FFFFF800 +000003FFFFF80000000003FFFFF80000000003FFFFF800000003FFFFF80000000003FFFF +F80000000003FFFFF800000003FFFFF80000000003FFFFF80000000003FFFFF800000003 +FFFFF80000000003FFFFF80000000003FFFFF800000003FFFFF80000000003FFFFF80000 +000003FFFFF800000003FFFFF80000000003FFFFF80000000003FFFFF800000003FFFFF8 +0000000003FFFFF80000000003FFFFF800000003FFFFF80000000003FFFFF80000000003 +FFFFF800000003FFFFF80000000003FFFFF80000000003FFFFF800000003FFFFF8000000 +0003FFFFF80000000003FFFFF800000003FFFFF80000000003FFFFF80000000003FFFFF8 +00000003FFFFF80000000003FFFFF80000000003FFFFF800000003FFFFF80000000003FF +FFF80000000003FFFFF800000003FFFFF80000000003FFFFF80000000003FFFFF8000000 +03FFFFF80000000003FFFFF80000000003FFFFF800000003FFFFF80000000003FFFFF800 +00000003FFFFF80000FFFFFFFFFFFFE000FFFFFFFFFFFFE000FFFFFFFFFFFFE0FFFFFFFF +FFFFE000FFFFFFFFFFFFE000FFFFFFFFFFFFE0FFFFFFFFFFFFE000FFFFFFFFFFFFE000FF +FFFFFFFFFFE0FFFFFFFFFFFFE000FFFFFFFFFFFFE000FFFFFFFFFFFFE0FFFFFFFFFFFFE0 +00FFFFFFFFFFFFE000FFFFFFFFFFFFE0FFFFFFFFFFFFE000FFFFFFFFFFFFE000FFFFFFFF +FFFFE0FFFFFFFFFFFFE000FFFFFFFFFFFFE000FFFFFFFFFFFFE0FFFFFFFFFFFFE000FFFF +FFFFFFFFE000FFFFFFFFFFFFE0FFFFFFFFFFFFE000FFFFFFFFFFFFE000FFFFFFFFFFFFE0 +B35D77DCC2>I<0000003FE0000003FFFC0000000000007FFFFFE000003FFFFFE0000000 +00FFFFFFFFE00000FFFFFFF800000000FFFFFFFFE00007FFFFFFFE00000000FFFFFFFFE0 +001FFFFFFFFF80000000FFFFFFFFE0003FFFFFFFFFE0000000FFFFFFFFE000FFFFFFFFFF +F0000000FFFFFFFFE001FFFC03FFFFF8000000FFFFFFFFE003FFC000FFFFFC000000FFFF +FFFFE007FE00007FFFFE000000FFFFFFFFE00FF800003FFFFE000000003FFFFFE01FE000 +003FFFFF0000000007FFFFE03FC000001FFFFF8000000007FFFFE07F8000001FFFFF8000 +000003FFFFE0FE0000001FFFFFC000000003FFFFE1FC0000000FFFFFC000000003FFFFE1 +F80000000FFFFFC000000003FFFFE3F80000000FFFFFE000000003FFFFE7F00000000FFF +FFE000000003FFFFE7E000000007FFFFE000000003FFFFEFC000000007FFFFE000000003 +FFFFEFC000000007FFFFE000000003FFFFFF8000000007FFFFF000000003FFFFFF800000 +0007FFFFF000000003FFFFFF0000000007FFFFF000000003FFFFFF0000000007FFFFF000 +000003FFFFFE0000000007FFFFF000000003FFFFFE0000000007FFFFF000000003FFFFFE +0000000007FFFFF000000003FFFFFC0000000007FFFFF000000003FFFFFC0000000007FF +FFF000000003FFFFFC0000000007FFFFF000000003FFFFFC0000000007FFFFF000000003 +FFFFF80000000007FFFFF000000003FFFFF80000000007FFFFF000000003FFFFF8000000 +0007FFFFF000000003FFFFF80000000007FFFFF000000003FFFFF80000000007FFFFF000 +000003FFFFF80000000007FFFFF000000003FFFFF80000000007FFFFF000000003FFFFF8 +0000000007FFFFF000000003FFFFF80000000007FFFFF000000003FFFFF80000000007FF +FFF000000003FFFFF80000000007FFFFF000000003FFFFF80000000007FFFFF000000003 +FFFFF80000000007FFFFF000000003FFFFF80000000007FFFFF000000003FFFFF8000000 +0007FFFFF000000003FFFFF80000000007FFFFF000000003FFFFF80000000007FFFFF000 +000003FFFFF80000000007FFFFF000000003FFFFF80000000007FFFFF000000003FFFFF8 +0000000007FFFFF000000003FFFFF80000000007FFFFF000000003FFFFF80000000007FF +FFF000000003FFFFF80000000007FFFFF000000003FFFFF80000000007FFFFF000000003 +FFFFF80000000007FFFFF000000003FFFFF80000000007FFFFF000000003FFFFF8000000 +0007FFFFF000000003FFFFF80000000007FFFFF000000003FFFFF80000000007FFFFF000 +000003FFFFF80000000007FFFFF000000003FFFFF80000000007FFFFF000000003FFFFF8 +0000000007FFFFF000000003FFFFF80000000007FFFFF000000003FFFFF80000000007FF +FFF000000003FFFFF80000000007FFFFF000000003FFFFF80000000007FFFFF000000003 +FFFFF80000000007FFFFF000000003FFFFF80000000007FFFFF000000003FFFFF8000000 +0007FFFFF000000003FFFFF80000000007FFFFF000000003FFFFF80000000007FFFFF000 +000003FFFFF80000000007FFFFF000000003FFFFF80000000007FFFFF000000003FFFFF8 +0000000007FFFFF000000003FFFFF80000000007FFFFF000000003FFFFF80000000007FF +FFF000000003FFFFF80000000007FFFFF000000003FFFFF80000000007FFFFF000000003 +FFFFF80000000007FFFFF000000003FFFFF80000000007FFFFF000000003FFFFF8000000 +0007FFFFF00000FFFFFFFFFFFFE001FFFFFFFFFFFFC0FFFFFFFFFFFFE001FFFFFFFFFFFF +C0FFFFFFFFFFFFE001FFFFFFFFFFFFC0FFFFFFFFFFFFE001FFFFFFFFFFFFC0FFFFFFFFFF +FFE001FFFFFFFFFFFFC0FFFFFFFFFFFFE001FFFFFFFFFFFFC0FFFFFFFFFFFFE001FFFFFF +FFFFFFC0FFFFFFFFFFFFE001FFFFFFFFFFFFC0FFFFFFFFFFFFE001FFFFFFFFFFFFC0725D +77DC81>I<00000000001FFFF000000000000000000007FFFFFFC000000000000000007F +FFFFFFFC0000000000000003FFFFFFFFFF800000000000000FFFFFFFFFFFE00000000000 +007FFFFFFFFFFFFC000000000001FFFFFFFFFFFFFF000000000003FFFFFC007FFFFF8000 +0000000FFFFFC00007FFFFE0000000001FFFFE000000FFFFF0000000007FFFF80000003F +FFFC00000000FFFFF00000001FFFFE00000001FFFFC000000007FFFF00000003FFFF8000 +000003FFFF80000007FFFF0000000001FFFFC000000FFFFE0000000000FFFFE000001FFF +FC00000000007FFFF000003FFFFC00000000007FFFF800007FFFF800000000003FFFFC00 +007FFFF800000000003FFFFC0000FFFFF000000000001FFFFE0001FFFFF000000000001F +FFFF0001FFFFE000000000000FFFFF0003FFFFE000000000000FFFFF8003FFFFE0000000 +00000FFFFF8007FFFFE000000000000FFFFFC007FFFFC0000000000007FFFFC00FFFFFC0 +000000000007FFFFE00FFFFFC0000000000007FFFFE01FFFFFC0000000000007FFFFF01F +FFFFC0000000000007FFFFF01FFFFFC0000000000007FFFFF03FFFFF80000000000003FF +FFF83FFFFF80000000000003FFFFF83FFFFF80000000000003FFFFF83FFFFF8000000000 +0003FFFFF87FFFFF80000000000003FFFFFC7FFFFF80000000000003FFFFFC7FFFFF8000 +0000000003FFFFFC7FFFFF80000000000003FFFFFC7FFFFF80000000000003FFFFFCFFFF +FF80000000000003FFFFFEFFFFFF80000000000003FFFFFEFFFFFF80000000000003FFFF +FEFFFFFF80000000000003FFFFFEFFFFFF80000000000003FFFFFEFFFFFF800000000000 +03FFFFFEFFFFFF80000000000003FFFFFEFFFFFF80000000000003FFFFFEFFFFFF800000 +00000003FFFFFEFFFFFF80000000000003FFFFFEFFFFFF80000000000003FFFFFEFFFFFF +80000000000003FFFFFEFFFFFF80000000000003FFFFFEFFFFFF80000000000003FFFFFE +FFFFFF80000000000003FFFFFE7FFFFF80000000000003FFFFFC7FFFFF80000000000003 +FFFFFC7FFFFF80000000000003FFFFFC7FFFFF80000000000003FFFFFC7FFFFF80000000 +000003FFFFFC3FFFFFC0000000000007FFFFF83FFFFFC0000000000007FFFFF83FFFFFC0 +000000000007FFFFF81FFFFFC0000000000007FFFFF01FFFFFC0000000000007FFFFF01F +FFFFC0000000000007FFFFF00FFFFFC0000000000007FFFFE00FFFFFE000000000000FFF +FFE007FFFFE000000000000FFFFFC007FFFFE000000000000FFFFFC003FFFFF000000000 +001FFFFF8003FFFFF000000000001FFFFF8001FFFFF000000000001FFFFF0001FFFFF800 +000000003FFFFF0000FFFFF800000000003FFFFE00007FFFFC00000000007FFFFC00007F +FFFE0000000000FFFFFC00003FFFFE0000000000FFFFF800001FFFFF0000000001FFFFF0 +00000FFFFF8000000003FFFFE0000007FFFFC000000007FFFFC0000003FFFFF00000001F +FFFF80000001FFFFF80000003FFFFF000000007FFFFE000000FFFFFC000000003FFFFFC0 +0007FFFFF8000000001FFFFFFC007FFFFFF00000000007FFFFFFFFFFFFFFC00000000001 +FFFFFFFFFFFFFF0000000000007FFFFFFFFFFFFC0000000000001FFFFFFFFFFFF0000000 +00000003FFFFFFFFFF80000000000000007FFFFFFFFC000000000000000007FFFFFFC000 +00000000000000001FFFF00000000000675F7ADD74>I<0000001FF000001FFFC0000000 +0000007FFFFFF00003FFFFFF0000000000FFFFFFFFF0001FFFFFFFE000000000FFFFFFFF +F000FFFFFFFFFC00000000FFFFFFFFF003FFFFFFFFFF00000000FFFFFFFFF00FFFFFFFFF +FFC0000000FFFFFFFFF03FFFFFFFFFFFF0000000FFFFFFFFF0FFFFFFFFFFFFFC000000FF +FFFFFFF1FFFFC01FFFFFFE000000FFFFFFFFF7FFFC0001FFFFFF000000FFFFFFFFFFFFE0 +00007FFFFFC00000003FFFFFFFFF0000003FFFFFE000000007FFFFFFFE0000000FFFFFF0 +00000007FFFFFFF800000007FFFFF800000003FFFFFFE000000003FFFFFC00000003FFFF +FFC000000001FFFFFE00000003FFFFFF8000000000FFFFFE00000003FFFFFF0000000000 +7FFFFF00000003FFFFFE00000000007FFFFF80000003FFFFFC00000000003FFFFFC00000 +03FFFFF800000000003FFFFFC0000003FFFFF800000000001FFFFFE0000003FFFFF80000 +0000001FFFFFE0000003FFFFF800000000000FFFFFF0000003FFFFF800000000000FFFFF +F0000003FFFFF8000000000007FFFFF8000003FFFFF8000000000007FFFFF8000003FFFF +F8000000000007FFFFFC000003FFFFF8000000000003FFFFFC000003FFFFF80000000000 +03FFFFFC000003FFFFF8000000000003FFFFFE000003FFFFF8000000000003FFFFFE0000 +03FFFFF8000000000001FFFFFE000003FFFFF8000000000001FFFFFE000003FFFFF80000 +00000001FFFFFF000003FFFFF8000000000001FFFFFF000003FFFFF8000000000001FFFF +FF000003FFFFF8000000000001FFFFFF000003FFFFF8000000000000FFFFFF000003FFFF +F8000000000000FFFFFF800003FFFFF8000000000000FFFFFF800003FFFFF80000000000 +00FFFFFF800003FFFFF8000000000000FFFFFF800003FFFFF8000000000000FFFFFF8000 +03FFFFF8000000000000FFFFFF800003FFFFF8000000000000FFFFFF800003FFFFF80000 +00000000FFFFFF800003FFFFF8000000000000FFFFFF800003FFFFF8000000000000FFFF +FF800003FFFFF8000000000000FFFFFF800003FFFFF8000000000000FFFFFF800003FFFF +F8000000000000FFFFFF800003FFFFF8000000000000FFFFFF800003FFFFF80000000000 +00FFFFFF800003FFFFF8000000000000FFFFFF800003FFFFF8000000000000FFFFFF0000 +03FFFFF8000000000001FFFFFF000003FFFFF8000000000001FFFFFF000003FFFFF80000 +00000001FFFFFF000003FFFFF8000000000001FFFFFF000003FFFFF8000000000001FFFF +FE000003FFFFF8000000000001FFFFFE000003FFFFF8000000000003FFFFFE000003FFFF +F8000000000003FFFFFC000003FFFFF8000000000003FFFFFC000003FFFFF80000000000 +03FFFFFC000003FFFFF8000000000007FFFFF8000003FFFFF8000000000007FFFFF80000 +03FFFFF8000000000007FFFFF0000003FFFFF800000000000FFFFFF0000003FFFFF80000 +0000000FFFFFE0000003FFFFF800000000001FFFFFE0000003FFFFF800000000001FFFFF +C0000003FFFFF800000000003FFFFFC0000003FFFFFC00000000003FFFFF80000003FFFF +FE00000000007FFFFF00000003FFFFFF0000000000FFFFFE00000003FFFFFF0000000000 +FFFFFE00000003FFFFFF8000000001FFFFFC00000003FFFFFFC000000003FFFFF8000000 +03FFFFFFE000000007FFFFF000000003FFFFFFF80000000FFFFFE000000003FFFFFFFC00 +00003FFFFFC000000003FFFFFFFF0000007FFFFF0000000003FFFFFFFFC00001FFFFFE00 +00000003FFFFFBFFF00007FFFFFC0000000003FFFFF8FFFF007FFFFFF00000000003FFFF +F87FFFFFFFFFFFC00000000003FFFFF81FFFFFFFFFFF000000000003FFFFF80FFFFFFFFF +FC000000000003FFFFF803FFFFFFFFF0000000000003FFFFF8007FFFFFFF800000000000 +03FFFFF8000FFFFFF800000000000003FFFFF80000FFFE0000000000000003FFFFF80000 +00000000000000000003FFFFF8000000000000000000000003FFFFF80000000000000000 +00000003FFFFF8000000000000000000000003FFFFF8000000000000000000000003FFFF +F8000000000000000000000003FFFFF8000000000000000000000003FFFFF80000000000 +00000000000003FFFFF8000000000000000000000003FFFFF80000000000000000000000 +03FFFFF8000000000000000000000003FFFFF8000000000000000000000003FFFFF80000 +00000000000000000003FFFFF8000000000000000000000003FFFFF80000000000000000 +00000003FFFFF8000000000000000000000003FFFFF8000000000000000000000003FFFF +F8000000000000000000000003FFFFF8000000000000000000000003FFFFF80000000000 +00000000000003FFFFF8000000000000000000000003FFFFF80000000000000000000000 +03FFFFF8000000000000000000000003FFFFF8000000000000000000000003FFFFF80000 +00000000000000000003FFFFF8000000000000000000000003FFFFF80000000000000000 +00000003FFFFF8000000000000000000000003FFFFF8000000000000000000000003FFFF +F800000000000000000000FFFFFFFFFFFFE00000000000000000FFFFFFFFFFFFE0000000 +0000000000FFFFFFFFFFFFE00000000000000000FFFFFFFFFFFFE00000000000000000FF +FFFFFFFFFFE00000000000000000FFFFFFFFFFFFE00000000000000000FFFFFFFFFFFFE0 +0000000000000000FFFFFFFFFFFFE00000000000000000FFFFFFFFFFFFE0000000000000 +0000718579DC81>I<0000007FC00001FF80000000FFFFFFC0001FFFF80000FFFFFFFFC0 +007FFFFE0000FFFFFFFFC001FFFFFF8000FFFFFFFFC003FFFFFFE000FFFFFFFFC007FFFF +FFF000FFFFFFFFC00FFFFFFFF800FFFFFFFFC01FFF07FFFC00FFFFFFFFC03FF00FFFFE00 +FFFFFFFFC07FC01FFFFE00FFFFFFFFC0FF803FFFFF00003FFFFFC0FF003FFFFF000007FF +FFC1FE007FFFFF800007FFFFC3FC007FFFFF800003FFFFC3F8007FFFFF800003FFFFC7F0 +007FFFFF800003FFFFC7E0007FFFFF800003FFFFCFE0007FFFFF800003FFFFCFC0007FFF +FF800003FFFFDFC0007FFFFF800003FFFFDF80003FFFFF000003FFFFDF80003FFFFF0000 +03FFFFFF00001FFFFE000003FFFFFF00001FFFFE000003FFFFFF00000FFFFC000003FFFF +FE000007FFF8000003FFFFFE000003FFF0000003FFFFFE000000FFC0000003FFFFFC0000 +000000000003FFFFFC0000000000000003FFFFFC0000000000000003FFFFFC0000000000 +000003FFFFF80000000000000003FFFFF80000000000000003FFFFF80000000000000003 +FFFFF80000000000000003FFFFF80000000000000003FFFFF80000000000000003FFFFF0 +0000000000000003FFFFF00000000000000003FFFFF00000000000000003FFFFF0000000 +0000000003FFFFF00000000000000003FFFFF00000000000000003FFFFF0000000000000 +0003FFFFF00000000000000003FFFFF00000000000000003FFFFF00000000000000003FF +FFF00000000000000003FFFFF00000000000000003FFFFF00000000000000003FFFFF000 +00000000000003FFFFF00000000000000003FFFFF00000000000000003FFFFF000000000 +00000003FFFFF00000000000000003FFFFF00000000000000003FFFFF000000000000000 +03FFFFF00000000000000003FFFFF00000000000000003FFFFF00000000000000003FFFF +F00000000000000003FFFFF00000000000000003FFFFF00000000000000003FFFFF00000 +000000000003FFFFF00000000000000003FFFFF00000000000000003FFFFF00000000000 +000003FFFFF00000000000000003FFFFF00000000000000003FFFFF00000000000000003 +FFFFF00000000000000003FFFFF00000000000000003FFFFF00000000000000003FFFFF0 +0000000000000003FFFFF00000000000000003FFFFF00000000000000003FFFFF0000000 +0000000003FFFFF00000000000000003FFFFF00000000000000003FFFFF0000000000000 +0003FFFFF00000000000000003FFFFF00000000000000003FFFFF0000000000000FFFFFF +FFFFFFFC00000000FFFFFFFFFFFFFC00000000FFFFFFFFFFFFFC00000000FFFFFFFFFFFF +FC00000000FFFFFFFFFFFFFC00000000FFFFFFFFFFFFFC00000000FFFFFFFFFFFFFC0000 +0000FFFFFFFFFFFFFC00000000FFFFFFFFFFFFFC00000000515D79DC5F>114 +D<0000001FFFF80000F800000003FFFFFFC001FC0000003FFFFFFFFC07FC000000FFFFFF +FFFF1FFC000003FFFFFFFFFFFFFC00000FFFFFFFFFFFFFFC00003FFFF8001FFFFFFC0000 +7FFF000000FFFFFC0000FFF80000003FFFFC0001FFE00000000FFFFC0003FFC000000003 +FFFC0007FF8000000001FFFC000FFF0000000000FFFC000FFF00000000007FFC001FFE00 +000000003FFC001FFE00000000001FFC003FFC00000000001FFC003FFC00000000001FFC +007FFC00000000000FFC007FFC00000000000FFC007FFC00000000000FFC007FFC000000 +000007FC00FFFE000000000007FC00FFFE000000000007FC00FFFF000000000007FC00FF +FF000000000007FC00FFFF800000000007FC00FFFFE00000000003F800FFFFF000000000 +000000FFFFFC00000000000000FFFFFF00000000000000FFFFFFF00000000000007FFFFF +FF8000000000007FFFFFFFFC00000000007FFFFFFFFFF0000000003FFFFFFFFFFF800000 +003FFFFFFFFFFFF00000001FFFFFFFFFFFFE0000001FFFFFFFFFFFFF8000000FFFFFFFFF +FFFFE000000FFFFFFFFFFFFFF0000007FFFFFFFFFFFFFC000003FFFFFFFFFFFFFE000001 +FFFFFFFFFFFFFF800000FFFFFFFFFFFFFFC000007FFFFFFFFFFFFFE000001FFFFFFFFFFF +FFF000000FFFFFFFFFFFFFF8000003FFFFFFFFFFFFF8000000FFFFFFFFFFFFFC0000003F +FFFFFFFFFFFE0000000FFFFFFFFFFFFE00000001FFFFFFFFFFFF000000001FFFFFFFFFFF +00000000007FFFFFFFFF800000000003FFFFFFFF8000000000000FFFFFFFC00000000000 +00FFFFFFC00000000000003FFFFFC00000000000000FFFFFE07F000000000003FFFFE0FF +800000000001FFFFE0FF8000000000007FFFE0FF8000000000003FFFE0FFC00000000000 +3FFFE0FFC000000000001FFFE0FFC000000000000FFFE0FFE000000000000FFFE0FFE000 +000000000FFFE0FFE0000000000007FFE0FFF0000000000007FFE0FFF0000000000007FF +C0FFF8000000000007FFC0FFF8000000000007FFC0FFFC000000000007FFC0FFFC000000 +000007FF80FFFE00000000000FFF80FFFF00000000000FFF80FFFF00000000000FFF00FF +FF80000000001FFF00FFFFC0000000001FFE00FFFFE0000000003FFE00FFFFF000000000 +7FFC00FFFFFC00000000FFF800FFFFFE00000003FFF000FFFFFF80000007FFE000FFFFFF +F000003FFFC000FFFFFFFF0007FFFF8000FFF9FFFFFFFFFFFF0000FFF07FFFFFFFFFFC00 +00FFE01FFFFFFFFFF00000FF8007FFFFFFFFC00000FF0001FFFFFFFF000000FE00003FFF +FFF80000007C000003FFFE000000004B5F78DD5C>I<0000000FF80000000000001FF000 +00007FFFFFF800000000FFFFFFF00000FFFFFFFFF8000001FFFFFFFFF00000FFFFFFFFF8 +000001FFFFFFFFF00000FFFFFFFFF8000001FFFFFFFFF00000FFFFFFFFF8000001FFFFFF +FFF00000FFFFFFFFF8000001FFFFFFFFF00000FFFFFFFFF8000001FFFFFFFFF00000FFFF +FFFFF8000001FFFFFFFFF00000FFFFFFFFF8000001FFFFFFFFF00000FFFFFFFFF8000001 +FFFFFFFFF00000003FFFFFF8000000007FFFFFF000000007FFFFF8000000000FFFFFF000 +000007FFFFF8000000000FFFFFF000000003FFFFF80000000007FFFFF000000003FFFFF8 +0000000007FFFFF000000003FFFFF80000000007FFFFF000000003FFFFF80000000007FF +FFF000000003FFFFF80000000007FFFFF000000003FFFFF80000000007FFFFF000000003 +FFFFF80000000007FFFFF000000003FFFFF80000000007FFFFF000000003FFFFF8000000 +0007FFFFF000000003FFFFF80000000007FFFFF000000003FFFFF80000000007FFFFF000 +000003FFFFF80000000007FFFFF000000003FFFFF80000000007FFFFF000000003FFFFF8 +0000000007FFFFF000000003FFFFF80000000007FFFFF000000003FFFFF80000000007FF +FFF000000003FFFFF80000000007FFFFF000000003FFFFF80000000007FFFFF000000003 +FFFFF80000000007FFFFF000000003FFFFF80000000007FFFFF000000003FFFFF8000000 +0007FFFFF000000003FFFFF80000000007FFFFF000000003FFFFF80000000007FFFFF000 +000003FFFFF80000000007FFFFF000000003FFFFF80000000007FFFFF000000003FFFFF8 +0000000007FFFFF000000003FFFFF80000000007FFFFF000000003FFFFF80000000007FF +FFF000000003FFFFF80000000007FFFFF000000003FFFFF80000000007FFFFF000000003 +FFFFF80000000007FFFFF000000003FFFFF80000000007FFFFF000000003FFFFF8000000 +0007FFFFF000000003FFFFF80000000007FFFFF000000003FFFFF80000000007FFFFF000 +000003FFFFF80000000007FFFFF000000003FFFFF80000000007FFFFF000000003FFFFF8 +0000000007FFFFF000000003FFFFF80000000007FFFFF000000003FFFFF80000000007FF +FFF000000003FFFFF80000000007FFFFF000000003FFFFF80000000007FFFFF000000003 +FFFFF80000000007FFFFF000000003FFFFF80000000007FFFFF000000003FFFFF8000000 +0007FFFFF000000003FFFFF80000000007FFFFF000000003FFFFF80000000007FFFFF000 +000003FFFFF80000000007FFFFF000000003FFFFF80000000007FFFFF000000003FFFFF8 +0000000007FFFFF000000003FFFFF80000000007FFFFF000000003FFFFF80000000007FF +FFF000000003FFFFF8000000000FFFFFF000000003FFFFF8000000000FFFFFF000000003 +FFFFF8000000000FFFFFF000000003FFFFF8000000000FFFFFF000000003FFFFF8000000 +001FFFFFF000000003FFFFF8000000001FFFFFF000000003FFFFF8000000003FFFFFF000 +000003FFFFF8000000003FFFFFF000000001FFFFF8000000003FFFFFF000000001FFFFF8 +000000007FFFFFF000000001FFFFF800000000FFFFFFF000000001FFFFF800000000FFFF +FFF000000000FFFFF800000001FBFFFFF000000000FFFFFC00000003FBFFFFF000000000 +7FFFFC00000007F3FFFFF0000000007FFFFC0000000FE3FFFFF8000000003FFFFE000000 +1FE3FFFFF8000000003FFFFE0000007FC3FFFFFF000000001FFFFF800000FF83FFFFFFFF +C000000FFFFFC00003FF03FFFFFFFFC0000007FFFFFC003FFE03FFFFFFFFC0000003FFFF +FFFFFFFC03FFFFFFFFC0000000FFFFFFFFFFF803FFFFFFFFC00000007FFFFFFFFFE003FF +FFFFFFC00000001FFFFFFFFF8003FFFFFFFFC000000003FFFFFFFE0003FFFFFFFFC00000 +00007FFFFFF80003FFFFFFFFC00000000001FFFFC00003FFFF000000725E77DC81>117 +D E /Fd 12 119 df<0E003F807FC0FFE0FFE0FFE0FFE0FFE07FC03F800E000B0B6C8A33 +>46 D<000007F8000000003FFF00000001FFFF80000003FFFFE000000FFFFFF000001FFF +FFF800003FF80FF800007FE003FC0000FF8001FC0001FE0000FE0003FC00007E0003F800 +7E7F0007F001FFBF000FE007FFFF000FE00FFFFF801FC01FFFFF801F803FFFFF803F803F +C3FF803F007F00FF803F00FE007FC07F00FE007FC07E00FC003FC07E01FC003FC07E01F8 +001FC0FE01F8001FC0FC03F8001FC0FC03F0000FC0FC03F0000FC0FC03F0000FC0FC03F0 +000FC0FC03F0000FC0FC03F0000FC0FC03F0000FC0FC03F0000FC0FC03F0000FC0FC03F8 +001FC0FE01F8001F807E01F8001F807E01FC003F807E00FC003F007F00FE007F003F00FE +007F003F007F00FE003F803FC3FC001F803FFFFC001FC01FFFF8000FE00FFFF0000FE007 +FFE00007F001FF800003F8007E000003FC00000F8001FE00001FC000FF80003FC0007FE0 +00FFC0003FF803FF80001FFFFFFF00000FFFFFFE000003FFFFF8000001FFFFF00000003F +FF8000000007FC00002A3D7CBC33>64 D<3FF8000000007FFC00000000FFFC00000000FF +FC000000007FFC000000003FFC0000000000FC0000000000FC0000000000FC0000000000 +FC0000000000FC0000000000FC0000000000FC0000000000FC0000000000FC0000000000 +FC0000000000FC0000000000FC0000000000FC01FC000000FC0FFF800000FC3FFFE00000 +FCFFFFF00000FDFFFFF80000FFFFFFFC0000FFFC07FE0000FFF001FF0000FFC000FF0000 +FF80007F8000FF00003FC000FF00001FC000FE00000FC000FE00000FE000FC000007E000 +FC000007E000FC000007F000FC000003F000FC000003F000FC000003F000FC000003F000 +FC000003F000FC000003F000FC000003F000FC000003F000FC000007F000FC000007F000 +FE000007E000FE000007E000FE00000FE000FF00000FC000FF00001FC000FF80003F8000 +FF80007F8000FFC000FF0000FFF003FE0000FFF80FFE0000FFFFFFFC0000FDFFFFF80000 +FCFFFFE00000FC7FFFC000007C1FFF0000003803F800002C3D7EBC33>98 +D<0000FF80000007FFF000001FFFFC00007FFFFF0000FFFFFF8001FFFFFFC003FF80FFE0 +07FC001FE00FF00007F00FE00003F81FC00003F83F800001F83F800001FC7F000000FC7E +000000FC7E000000FE7E0000007EFE0000007EFFFFFFFFFEFFFFFFFFFEFFFFFFFFFEFFFF +FFFFFEFFFFFFFFFEFFFFFFFFFCFC00000000FE000000007E000000007E000000007F0000 +00003F000000003F8000003C1FC000007E1FC000007E0FF00000FE07F80001FE07FE0003 +FC03FFC01FF801FFFFFFF8007FFFFFF0003FFFFFC0000FFFFF800003FFFE0000007FF000 +272B7BAA33>101 D<000000000FC00000FE007FF00007FFC1FFF8001FFFF7FFF8003FFF +FFFFFC007FFFFFFFFC00FFFFFFE1FC01FF01FF80F801FC007F007003F8003F800007F000 +1FC00007E0000FC00007E0000FC0000FE0000FE0000FC00007E0000FC00007E0000FC000 +07E0000FC00007E0000FC00007E0000FC00007E0000FE0000FE00007E0000FC00007E000 +0FC00007F0001FC00003F8003F800001FC007F000001FF01FF000001FFFFFE000003FFFF +FC000003FFFFF8000007FFFFF0000007C7FFC0000007C0FE00000007C00000000007E000 +00000007E00000000007E00000000003F80000000003FFFFFC000001FFFFFFC00001FFFF +FFF80003FFFFFFFC000FFFFFFFFF000FFFFFFFFF801FF00003FF803FC000007FC07F8000 +001FE07F0000000FE07E00000007E0FE00000007F0FC00000003F0FC00000003F0FC0000 +0003F0FC00000003F0FC00000003F0FE00000007F07F0000000FE07F8000001FE03FE000 +007FC01FF80001FF800FFF000FFF0007FFFFFFFE0003FFFFFFFC0000FFFFFFF000003FFF +FFC000000FFFFF00000000FFF000002E437DAB33>103 D<0001C000000007F000000007 +F00000000FF80000000FF80000000FF800000007F000000007F000000001C00000000000 +000000000000000000000000000000000000000000000000000000000000000000000000 +00000000000000000000003FFFF000007FFFF800007FFFF800007FFFF800007FFFF80000 +3FFFF800000001F800000001F800000001F800000001F800000001F800000001F8000000 +01F800000001F800000001F800000001F800000001F800000001F800000001F800000001 +F800000001F800000001F800000001F800000001F800000001F800000001F800000001F8 +00000001F800000001F800000001F800000001F800000001F800000001F800000001F800 +000001F800000001F800000001F800007FFFFFFF80FFFFFFFFC0FFFFFFFFE0FFFFFFFFE0 +FFFFFFFFC07FFFFFFF80233E78BD33>105 D<3FFFF800007FFFFC0000FFFFFC0000FFFF +FC00007FFFFC00003FFFFC00000000FC00000000FC00000000FC00000000FC00000000FC +00000000FC00000000FC00000000FC00000000FC00000000FC00000000FC00000000FC00 +000000FC00000000FC00000000FC00000000FC00000000FC00000000FC00000000FC0000 +0000FC00000000FC00000000FC00000000FC00000000FC00000000FC00000000FC000000 +00FC00000000FC00000000FC00000000FC00000000FC00000000FC00000000FC00000000 +FC00000000FC00000000FC00000000FC00000000FC00000000FC00000000FC00000000FC +00000000FC00000000FC00000000FC00000000FC00000000FC00000000FC00000000FC00 +000000FC00003FFFFFFFF07FFFFFFFF8FFFFFFFFFCFFFFFFFFFC7FFFFFFFF83FFFFFFFF0 +263D7ABC33>108 D<3FF803FC00007FFC1FFF0000FFFC7FFFC000FFFCFFFFE0007FFDFF +FFF0003FFFFFFFF00000FFFC07F80000FFF003F80000FFC001F80000FF8001FC0000FF00 +00FC0000FF0000FC0000FE0000FC0000FE0000FC0000FE0000FC0000FC0000FC0000FC00 +00FC0000FC0000FC0000FC0000FC0000FC0000FC0000FC0000FC0000FC0000FC0000FC00 +00FC0000FC0000FC0000FC0000FC0000FC0000FC0000FC0000FC0000FC0000FC0000FC00 +00FC0000FC0000FC0000FC0000FC0000FC0000FC0000FC0000FC0000FC0000FC0000FC00 +00FC0000FC0000FC0000FC0000FC003FFFF01FFFF07FFFF83FFFF8FFFFFC7FFFFCFFFFFC +7FFFFC7FFFF83FFFF83FFFF01FFFF02E2B7EAA33>110 D<0001FE0000000FFFC000003F +FFF000007FFFF80001FFFFFE0003FFFFFF0007FF03FF8007F8007F800FF0003FC01FE000 +1FE01FC0000FE03F800007F03F000003F07F000003F87E000001F87E000001F87E000001 +F8FC000000FCFC000000FCFC000000FCFC000000FCFC000000FCFC000000FCFC000000FC +FC000000FCFE000001FC7E000001F87E000001F87F000003F87F000003F83F800007F03F +800007F01FC0000FE01FE0001FE00FF0003FC007FC00FF8007FF03FF8003FFFFFF0001FF +FFFE00007FFFF800003FFFF000000FFFC0000001FE0000262B7AAA33>I<7FFF0007FC00 +FFFF803FFF00FFFF80FFFFC0FFFF83FFFFC0FFFF87FFFFE07FFF8FFFFFE0001F9FF81FE0 +001FBFE01FE0001FFF800FC0001FFF000780001FFC000000001FF8000000001FF8000000 +001FF0000000001FE0000000001FE0000000001FC0000000001FC0000000001FC0000000 +001FC0000000001F80000000001F80000000001F80000000001F80000000001F80000000 +001F80000000001F80000000001F80000000001F80000000001F80000000001F80000000 +001F80000000001F80000000001F80000000001F80000000001F80000000001F80000000 +7FFFFFFC0000FFFFFFFE0000FFFFFFFE0000FFFFFFFE0000FFFFFFFE00007FFFFFFC0000 +2B2B7CAA33>114 D<0001C00000000003E00000000007E00000000007E00000000007E0 +0000000007E00000000007E00000000007E00000000007E00000000007E00000000007E0 +0000000007E00000003FFFFFFFFC007FFFFFFFFE00FFFFFFFFFE00FFFFFFFFFE00FFFFFF +FFFE007FFFFFFFFC000007E00000000007E00000000007E00000000007E00000000007E0 +0000000007E00000000007E00000000007E00000000007E00000000007E00000000007E0 +0000000007E00000000007E00000000007E00000000007E00000000007E00000000007E0 +0000000007E00000000007E00000000007E00000000007E00000000007E0000F000007E0 +001F800007E0001F800007E0001F800007E0001F800007E0001F800007E0003F800007F0 +003F000003F800FF000003FE03FE000001FFFFFE000001FFFFFC000000FFFFF80000007F +FFE00000001FFF8000000003FC000029377EB633>116 D<3FFFC00FFFF07FFFE01FFFF8 +FFFFF03FFFFCFFFFF03FFFFC7FFFE01FFFF83FFFC00FFFF001F800007E0001FC0000FE00 +00FC0000FC0000FC0000FC0000FE0001FC00007E0001F800007E0001F800007F0003F800 +003F0003F000003F0003F000003F8007F000001F8007E000001F8007E000001FC00FE000 +000FC00FC000000FC00FC000000FC00FC0000007E01F80000007E01F80000007E01F8000 +0007F03F80000003F03F00000003F03F00000003F87F00000001F87E00000001F87E0000 +0001FCFE00000000FCFC00000000FCFC00000000FFFC000000007FF8000000007FF80000 +00007FF8000000003FF0000000003FF0000000001FE0000000000FC000002E2B7EAA33> +118 D E /Fe 14 117 df<1C7FFFFFFFFFFE380808788716>46 D<00000000E000000000 +01E00000000001F00000000003F00000000003F00000000007F0000000000FF000000000 +0FF0000000001FF0000000001FF0000000003BF0000000007BF00000000073F000000000 +E3F000000000E3F800000001C1F800000003C1F80000000381F80000000701F800000007 +01F80000000E01F80000000E01F80000001C01F80000003C01F80000003801F800000070 +01FC0000007000FC000000E000FC000001E000FC000001C000FC000003FFFFFC000003FF +FFFC0000070000FC0000070000FC00000E0000FC00001E0000FC00001C0000FC00003800 +00FE00003800007E00007000007E0000F000007E0001E000007E0001E000007E0003E000 +007E000FF00000FF00FFFC001FFFF0FFFC001FFFF02C2F7CAE35>65 +D<000001FE003000000FFFC07000003F01E0F00000F80079F00003E0003DF00007C0001F +E0001F80000FE0003F00000FE0007E000007E000FC000007C001F8000007C003F0000007 +C003E0000007C007E0000007800FC0000007800FC0000007801F80000007801F80000007 +003F00000000003F00000000007F00000000007E00000000007E00000000007E00000000 +00FE0000000000FC0000000000FC0000000000FC0000000000FC0000000000FC00000000 +00FC0000003800F80000007800F80000007000FC0000007000FC000000F0007C000000E0 +007C000001E0007C000003C0003E00000380003E00000700001F00000E00000F80003C00 +0007C00078000003E001F0000000F80FC00000007FFF000000000FF80000002C2F75AD33 +>67 D<001FFF0000000FFF001FFF0000001FFF0000FF0000003FC00000FF0000003FC000 +00FF0000007F800000FF000000EF800001EF800000FF800001EF800001DF000001CF8000 +01DF000001CF8000039F000003CF8000073F000003CF8000073E0000038F80000E3E0000 +038F80001C3E0000078F80001C7E00000787C000387C00000707C000387C00000707C000 +707C00000F07C000E0FC00000F07C000E0F800000E07C001C0F800000E07C00380F80000 +1E07C00381F800001E07C00701F000001C03E00701F000001C03E00E01F000003C03E01C +03F000003C03E01C03E000003803E03803E000003803E03803E000007803E07007E00000 +7803E0E007C000007001F0E007C000007001F1C007C00000F001F3800FC00000F001F380 +0F800000E001F7000F800000E001F7000F800001E001FE001F800001E001FC001F000001 +C001FC001F000003E000F8001F00000FF000F8003F0000FFFE00F00FFFF800FFFE00E00F +FFF800402D7BAC40>77 D<00000FF00C00003FFC1C0000F80F3C0003C007BC00078003FC +000F0001F8001E0001F8003C0000F8003C0000F800780000F000780000F000780000F000 +F80000F000F80000E000F80000E000F800000000FC00000000FE00000000FF800000007F +F80000007FFF8000003FFFE000001FFFF8000007FFFC000001FFFE0000001FFE00000001 +FF000000007F000000003F000000001F000000001F000000000F001C00000F001C00000F +001C00000F001C00001F003C00001E003C00001E003C00003C003C00003C007E00007800 +7E0000F0007F0001E0007F8003C000F1F01F0000E07FFC0000C00FF00000262F7BAD28> +83 D<0FFFFFFFFFF00FFFFFFFFFF01FE00FE007F01F000FE001F01E000FC001E03C000F +C000E03C001FC000E038001FC000E078001F8001E078001F8001E070003F8001C0F0003F +8001C0E0003F0001C0E0003F0001C0E0007F0001C000007F00000000007E00000000007E +0000000000FE0000000000FE0000000000FC0000000000FC0000000001FC0000000001FC +0000000001F80000000001F80000000003F80000000003F80000000003F00000000003F0 +0000000007F00000000007F00000000007E00000000007E0000000000FE0000000000FE0 +000000000FC0000000000FC0000000001FC0000000001FC0000000001F80000000001F80 +000000003F800000003FFFFF8000003FFFFF8000002C2D74AC33>I<0007C000001FF000 +007C39C000F81FC001F01FC003E00FC007C00FC00FC00FC01F800F801F800F803F000F80 +3F001F807F001F007E001F007E001F007E003F00FE003E00FC003E00FC003E00FC007E18 +FC007C38F8007C38F8007C38F800FC387801FC787C03F8707C03F8703C0F78E01E1E3DE0 +0FF81FC003E00F001D1F799D24>97 D<000001F0000007FC00000F9E00001F1E00001E3E +00003E7E00003E7E00003E7E00007C3800007C0000007C0000007C000000FC000000F800 +0000F8000000F8000000F8000000F800007FFFF0007FFFF00001F0000001F0000001F000 +0003F0000003E0000003E0000003E0000003E0000007E0000007C0000007C0000007C000 +0007C000000FC000000F8000000F8000000F8000000F8000001F8000001F0000001F0000 +001F0000001F0000003F0000003E0000003E0000003E0000003E0000007E0000007C0000 +007C0000007C000038F80000FCF80000FCF80000FCF00000FDF00000F1E00000F3C00000 +7F8000001E0000001F3D81AE16>102 D<001F000003FF000003FF0000003F0000003F00 +00003E0000003E0000007E0000007E0000007C0000007C000000FC000000FC000000F800 +0000F8000001F8000001F87E0001F3FF8001F787C003FE03E003FC03E003F803E003F803 +E007F003E007E003E007E003E007C003E00FC007E00FC007C00F8007C00F8007C01F800F +C01F800F801F000F801F001F803F001F063F001F0E3E003F0E3E003E0E7E003E1E7E007E +1C7C007C1C7C003C38FC003C38FC003C70F8001FE0700007801F2F7BAD24>104 +D<07801FC0001FE07FF0003DF1E0F80038F3C0780078FF007C0070FE007C0070FE007C00 +F1FC007C00E1F8007C00E1F8007C00E1F0007C00C3F000FC0003F000F80003E000F80003 +E000F80007E001F80007E001F00007C001F00007C003F0000FC003E0C00FC003E1C00F80 +07E1C00F8007C1C01F8007C3C01F800FC3801F000F83801F000787003F000787003F0007 +8E003E0003FC001C0000F000221F7A9D28>110 D<0003F800000FFE00003E0F8000FC07 +C001F003C003E003E007E003E00FC001F00F8001F01F8001F03F0001F03F0003F07F0003 +F07E0003F07E0003F07E0007F0FE0007E0FC0007E0FC0007E0FC000FC0FC000FC0FC001F +80FC001F807C003F007C003E007C007C003E00F8001E01F0000F07C00007FF000001FC00 +001C1F799D24>I<07803E001FE0FF803CF3C1C038F781E078FF07E070FE07E070FC07E0 +F1FC07E0E1F80380E1F80000E1F00000C3F0000003F0000003E0000003E0000007E00000 +07E0000007C0000007C000000FC000000FC000000F8000000F8000001F8000001F800000 +1F0000001F0000003F0000003F0000003E0000001C0000001B1F7A9D1E>114 +D<000FC0007FF000F03C01E01C03C01E07801E07803E07803E0F803C0F80180FC0000FF8 +000FFF0007FFC003FFE001FFF0007FF00007F00003F00001F07800F0FC00F0FC00F0FC01 +F0F801E0E001E0E003C0F00780781F001FFC0007F000171F7A9D1D>I<001C00003E0000 +3E00007E00007E00007C00007C0000FC0000FC0000F80000F80001F80001F800FFFFE0FF +FFE003F00003F00003E00003E00007E00007E00007C00007C0000FC0000FC0000F80000F +80001F80001F80001F00001F00003F00C03F01C03E01C03E03C03E03803E07803C07003C +0E003C1E001E3C000FF00007C000132B7AA918>I E /Ff 18 120 +df<0000001FFC00000000000001FFFFC000000000000FF007F800000000003F8000FE00 +000000007E00003F0000000001F800000FC000000003F0000007E00000000FE0000003F8 +0000001FC0000001FC0000003F80000000FE0000007F000000007F000000FE000000003F +800000FC000000001F800001FC000000001FC00003F8000000000FE00003F8000000000F +E00007F00000000007F0000FF00000000007F8000FE00000000003F8001FE00000000003 +FC001FE00000000003FC001FC00000000001FC003FC00000000001FE003FC00000000001 +FE003FC00000000001FE007FC00000000001FF007F800000000000FF007F800000000000 +FF007F800000000000FF00FF800000000000FF80FF800000000000FF80FF800000000000 +FF80FF800000000000FF80FF800000000000FF80FF800000000000FF80FF800000000000 +FF80FF800000000000FF80FF800000000000FF80FF800000000000FF80FF800000000000 +FF80FF800000000000FF807F800000000000FF007FC00000000001FF007FC00000000001 +FF007FC00000000001FF007FC00000000001FF003FC00000000001FE003FE00000000003 +FE003FE00000000003FE001FE00000000003FC001FF00000000007FC000FF00000000007 +F8000FF00000000007F80007F8000000000FF00007F8000000000FF00003FC000000001F +E00001FC000000001FC00000FE000000003F800000FF000000007F8000007F000000007F +0000003F80000000FE0000001FC0000001FC0000000FE0000003F800000003F0000007E0 +00000001FC00001FC0000000007E00003F00000000003F8000FE00000000000FF007F800 +0000000001FFFFC00000000000001FFC0000000041467BC44C>79 +D82 D<0001FF000300000FFFE00300003FFFF80700 +00FE00FC070001F8001F0F0003E000079F0007C00001DF000F800000FF001F0000007F00 +1E0000003F003E0000003F007E0000001F007C0000001F007C0000000F00FC0000000F00 +FC0000000700FC0000000700FC0000000700FC0000000700FE0000000300FE0000000300 +FE0000000300FF00000003007F80000000007FC0000000003FE0000000003FF000000000 +1FFE000000001FFFE00000000FFFFE00000007FFFFE0000003FFFFFC000001FFFFFF0000 +00FFFFFFC000003FFFFFE000000FFFFFF0000001FFFFF80000001FFFFC00000001FFFE00 +0000001FFF0000000003FF0000000000FF80000000007F80000000003FC0000000003FC0 +000000001FC0000000000FE0400000000FE0C00000000FE0C000000007E0C000000007E0 +C000000007E0C000000007E0E000000007E0E000000007E0E000000007C0F000000007C0 +F00000000FC0F80000000F80F80000000F80FC0000001F00FE0000001E00FF0000003E00 +FBC000007C00F9F00000F800F07C0003F000E03FC00FE000E00FFFFF8000C003FFFE0000 +C0003FF000002B467BC436>I<0007FC000000003FFF80000000F80FC0000001C001F000 +00030000F800000600007C00000C00003E00000F80003F00001FC0001F00001FE0001F80 +001FE0001F80001FE0000FC0001FE0000FC0000FC0000FC0000780000FC0000000000FC0 +000000000FC0000000000FC0000000000FC000000007FFC0000000FFFFC000000FFE0FC0 +00003FE00FC00000FF000FC00001FC000FC00007F8000FC0000FE0000FC0001FE0000FC0 +001FC0000FC0003F80000FC0007F80000FC0007F80000FC060FF00000FC060FF00000FC0 +60FF00000FC060FF00001FC060FF00001FC060FF00001FC0607F000037C0607F800037C0 +603F800063C0401FC000C3E0C00FE00383F1C007F80F01FF8001FFFC00FF00001FF0003C +002B2E7CAC31>97 D<03F800000000FFF800000000FFF800000000FFF80000000007F800 +00000003F80000000001F80000000001F80000000001F80000000001F80000000001F800 +00000001F80000000001F80000000001F80000000001F80000000001F80000000001F800 +00000001F80000000001F80000000001F80000000001F80000000001F80000000001F800 +00000001F80000000001F80000000001F803FC000001F80FFF800001F83C07E00001F870 +00F80001F8C0007C0001F980003E0001FB00001F0001FE00000F8001FC00000FC001FC00 +0007E001F8000007F001F8000003F001F8000003F801F8000003F801F8000003FC01F800 +0001FC01F8000001FC01F8000001FC01F8000001FE01F8000001FE01F8000001FE01F800 +0001FE01F8000001FE01F8000001FE01F8000001FE01F8000001FE01F8000001FE01F800 +0001FC01F8000001FC01F8000003FC01F8000003FC01F8000003F801F8000003F801F800 +0007F001F8000007E001FC000007E001FC00000FC001F600001F8001F300001F0001E300 +003E0001E1C0007C0001C0E001F80001C03C07E00001801FFF8000000003FC00002F467D +C436>I<00007F80000003FFF000000FC07C00003E000E00007C00030000F800018001F0 +0000C003F00007C007E0000FE00FC0001FE00FC0001FE01FC0001FE03F80001FE03F8000 +0FC03F800007807F800000007F000000007F00000000FF00000000FF00000000FF000000 +00FF00000000FF00000000FF00000000FF00000000FF00000000FF00000000FF00000000 +7F000000007F000000007F800000007F800000003F800000003F800000301FC00000301F +C00000300FC000006007E000006003F00000C001F000018000F8000180007C000700003E +000E00001FC078000007FFF0000000FF8000242E7DAC2B>I<0001FE00000007FFC00000 +1F03F000007C00F80000F8007C0001F0003E0003E0001F0007E0001F8007C0000F800FC0 +000FC01FC0000FC01F80000FE03F800007E03F800007E07F800007E07F000007F07F0000 +07F07F000007F0FF000007F0FF000007F0FFFFFFFFF0FFFFFFFFF0FF00000000FF000000 +00FF00000000FF00000000FF00000000FF000000007F000000007F000000007F00000000 +3F800000003F800000003F800000301F800000301FC00000300FC000006007E000006003 +E00000C001F000018000F8000300007C000600003E001C00000F80F8000003FFE0000000 +FF0000242E7DAC2B>101 D<00001FC000007FF80001F03C0003E03E0007807E000F80FF +001F00FF003F00FF003E007E007E007E007E001800FC000000FC000000FC000000FC0000 +00FC000000FC000000FC000000FC000000FC000000FC000000FC000000FC000000FC0000 +00FC000000FC000000FC0000FFFFFF80FFFFFF80FFFFFF8000FC000000FC000000FC0000 +00FC000000FC000000FC000000FC000000FC000000FC000000FC000000FC000000FC0000 +00FC000000FC000000FC000000FC000000FC000000FC000000FC000000FC000000FC0000 +00FC000000FC000000FC000000FC000000FC000000FC000000FC000000FC000000FC0000 +00FC000000FC000000FC000000FC000000FC000000FC000001FF00007FFFFE007FFFFE00 +7FFFFE0020467EC51E>I<0001FC001FC0000FFF807FE0003F07E1E1F0007C01F781F000 +F800FE01F001F0007C00E003E0003E000007E0003F000007E0003F00000FC0001F80000F +C0001F80001FC0001FC0001FC0001FC0001FC0001FC0001FC0001FC0001FC0001FC0001F +C0001FC0001FC0001FC0000FC0001F80000FC0001F800007E0003F000007E0003F000003 +E0003E000001F0007C000000F800F8000001FC01F00000033F07E00000030FFF80000007 +01FC0000000600000000000600000000000600000000000E00000000000F000000000007 +000000000007000000000007800000000007E00000000003FFFFF8000001FFFFFF800000 +FFFFFFE000007FFFFFF80001FFFFFFFC0007C0000FFE000F000000FF001E0000003F003E +0000001F807C0000000F807C0000000FC0F800000007C0F800000007C0F800000007C0F8 +00000007C0F800000007C0F800000007C07C0000000F807C0000000F803E0000001F001F +0000003E000F8000007C0007E00001F80001F80007E000007F003F8000001FFFFE000000 +01FFE000002C417DAB31>I<03F800000000FFF800000000FFF800000000FFF800000000 +07F80000000003F80000000001F80000000001F80000000001F80000000001F800000000 +01F80000000001F80000000001F80000000001F80000000001F80000000001F800000000 +01F80000000001F80000000001F80000000001F80000000001F80000000001F800000000 +01F80000000001F80000000001F80000000001F801FE000001F80FFFC00001F81E03E000 +01F83001F00001F86000F80001F8C0007C0001F980007E0001FB00007E0001FE00003E00 +01FE00003F0001FC00003F0001FC00003F0001FC00003F0001F800003F0001F800003F00 +01F800003F0001F800003F0001F800003F0001F800003F0001F800003F0001F800003F00 +01F800003F0001F800003F0001F800003F0001F800003F0001F800003F0001F800003F00 +01F800003F0001F800003F0001F800003F0001F800003F0001F800003F0001F800003F00 +01F800003F0001F800003F0001F800003F0001F800003F0001F800003F0001F800003F00 +01F800003F0003FC00007F80FFFFF01FFFFEFFFFF01FFFFEFFFFF01FFFFE2F457DC436> +I<01E00003F00007F8000FFC000FFC000FFC000FFC0007F80003F00001E0000000000000 +0000000000000000000000000000000000000000000000000000000000000000000001F8 +00FFF800FFF800FFF80007F80003F80001F80001F80001F80001F80001F80001F80001F8 +0001F80001F80001F80001F80001F80001F80001F80001F80001F80001F80001F80001F8 +0001F80001F80001F80001F80001F80001F80001F80001F80001F80001F80001F80001F8 +0001F80001F80001F80003FC00FFFFF0FFFFF0FFFFF014437DC21B>I<03F801FE0000FF +F80FFFC000FFF81E03E000FFF83001F00007F86000F80003F8C0007C0001F980007E0001 +FB00007E0001FE00003E0001FE00003F0001FC00003F0001FC00003F0001FC00003F0001 +F800003F0001F800003F0001F800003F0001F800003F0001F800003F0001F800003F0001 +F800003F0001F800003F0001F800003F0001F800003F0001F800003F0001F800003F0001 +F800003F0001F800003F0001F800003F0001F800003F0001F800003F0001F800003F0001 +F800003F0001F800003F0001F800003F0001F800003F0001F800003F0001F800003F0001 +F800003F0001F800003F0001F800003F0003FC00007F80FFFFF01FFFFEFFFFF01FFFFEFF +FFF01FFFFE2F2C7DAB36>110 D<00007F8000000003FFF00000000FC0FC0000003E001F +0000007C000F800000F80007C00001F00003E00003E00001F00007C00000F8000FC00000 +FC000FC00000FC001F8000007E003F8000007F003F8000007F003F0000003F007F000000 +3F807F0000003F807F0000003F807F0000003F80FF0000003FC0FF0000003FC0FF000000 +3FC0FF0000003FC0FF0000003FC0FF0000003FC0FF0000003FC0FF0000003FC0FF000000 +3FC0FF0000003FC07F0000003F807F0000003F807F8000007F803F8000007F003F800000 +7F001F8000007E001FC00000FE000FC00000FC000FE00001FC0007E00001F80003F00003 +F00001F80007E00000FC000FC000003E001F0000001FC0FE00000007FFF8000000007F80 +00002A2E7DAC31>I<03F007F0FFF01FF8FFF0383CFFF0607E07F0C0FF03F180FF01F180 +FF01F300FF01F6007E01F6003C01F6000001FC000001FC000001FC000001FC000001F800 +0001F8000001F8000001F8000001F8000001F8000001F8000001F8000001F8000001F800 +0001F8000001F8000001F8000001F8000001F8000001F8000001F8000001F8000001F800 +0001F8000001F8000001F8000001F8000001F8000001F8000003FE0000FFFFFC00FFFFFC +00FFFFFC00202C7DAB26>114 D<003FE01801FFFC1807C01E380F0003781C0001F83800 +00F8380000787000007870000038F0000038F0000018F0000018F0000018F8000018FC00 +00187E0000187F8000003FF000003FFF80001FFFF80007FFFE0003FFFF8001FFFFC0003F +FFE00003FFF000001FF8000003F8400000FCC000007CC000007EC000003EE000003EE000 +001EE000001EF000001EF000001EF000001CF800001CF800003CFC000038FE000070FB00 +00E0F18001C0E0F00F80C07FFE00C00FF0001F2E7DAC26>I<000C0000000C0000000C00 +00000C0000000C0000000C0000001C0000001C0000001C0000001C0000003C0000003C00 +00007C0000007C000000FC000001FC000003FC000007FC00001FFFFFF0FFFFFFF0FFFFFF +F000FC000000FC000000FC000000FC000000FC000000FC000000FC000000FC000000FC00 +0000FC000000FC000000FC000000FC000000FC000000FC000000FC000000FC000000FC00 +0000FC000000FC000000FC000000FC000000FC000C00FC000C00FC000C00FC000C00FC00 +0C00FC000C00FC000C00FC000C00FC000C00FC000C007C0018007E0018007E0018003E00 +30001F0030000F80600007C0C00003FF8000007F001E3E7EBC26>I118 DI E /Fg 28 121 df<00001FF8000003FF +FF00000FFFFF80003FF00FC000FF800FE001FF001FE001FE001FE003FE003FF003FC001F +E003FC001FE003FC001FE003FC00078003FC00000003FC00000003FC00000003FC00FFF0 +FFFFFFFFF0FFFFFFFFF0FFFFFFFFF0FFFFFFFFF003FC000FF003FC000FF003FC000FF003 +FC000FF003FC000FF003FC000FF003FC000FF003FC000FF003FC000FF003FC000FF003FC +000FF003FC000FF003FC000FF003FC000FF003FC000FF003FC000FF003FC000FF003FC00 +0FF003FC000FF003FC000FF003FC000FF003FC000FF03FFFC0FFFF3FFFC0FFFF3FFFC0FF +FF3FFFC0FFFF282E7FAD2D>12 D<0001E0000003E000001FE00000FFE000FFFFE000FFFF +E000FFFFE000FF3FE000003FE000003FE000003FE000003FE000003FE000003FE000003F +E000003FE000003FE000003FE000003FE000003FE000003FE000003FE000003FE000003F +E000003FE000003FE000003FE000003FE000003FE000003FE000003FE000003FE000003F +E000003FE000003FE000003FE000003FE000003FE000003FE000003FE0007FFFFFF07FFF +FFF07FFFFFF07FFFFFF01C2C7AAB29>49 D<003FF00001FFFE0007FFFF801FFFFFE03F80 +FFF07F003FF87F001FFCFF800FFEFFC007FEFFC007FFFFC003FFFFC003FFFFC003FF7F80 +03FF3F0003FF000003FF000003FF000007FE000007FE00000FFC00000FF800001FF00000 +3FE000003FC000007F800000FE000001FC000003F0000007E000000FC00F001F000F003E +000F00FC001F01F0001E03E0001E07FFFFFE0FFFFFFE1FFFFFFE3FFFFFFE7FFFFFFEFFFF +FFFCFFFFFFFCFFFFFFFCFFFFFFFC202C7CAB29>I<000FFC0000007FFF800001FFFFE000 +03F81FF00007C00FF8000F8007FC000FE007FE001FF007FE001FF807FE001FF807FE001F +F807FE001FF807FE001FF807FE000FF00FFC0007E00FFC0000001FF80000001FF0000000 +3FE0000000FF8000003FFF0000003FFC0000003FFFC00000001FF00000000FFC00000007 +FE00000003FF00000003FF80000001FF80000001FFC01F0001FFC07FC001FFC0FFE001FF +C0FFE001FFC0FFE001FFC0FFE001FFC0FFE001FF80FFE003FF807FC003FF807F8007FF00 +3F8007FE001FF01FFC000FFFFFF80007FFFFE00000FFFF8000001FFC0000222D7DAB29> +I<000000FC000000000000FC000000000001FE000000000001FE000000000003FF000000 +000003FF000000000003FF000000000007FF800000000007FF80000000000FFFC0000000 +000FFFC0000000000FFFC0000000001FFFE0000000001F7FE0000000003F7FF000000000 +3E3FF0000000007E3FF8000000007C1FF8000000007C1FF800000000FC1FFC00000000F8 +0FFC00000001F80FFE00000001F007FE00000001F007FE00000003F007FF00000003E003 +FF00000007E003FF80000007C001FF80000007C001FF8000000FC000FFC000000F8000FF +C000001FFFFFFFE000001FFFFFFFE000003FFFFFFFF000003FFFFFFFF000003E00003FF0 +00007E00003FF800007C00001FF80000FC00001FFC0000F800000FFC0000F800000FFC00 +01F800000FFE0001F0000007FE00FFFFC001FFFFFCFFFFC001FFFFFCFFFFC001FFFFFCFF +FFC001FFFFFC362F7DAE3D>65 DI<000003FF8001C000003FFFF803C00001FF +FFFE07C00007FFFFFF8FC0001FFF801FFFC0007FF80007FFC000FFE00001FFC001FFC000 +00FFC003FF0000007FC007FE0000003FC00FFE0000003FC01FFC0000001FC01FF8000000 +0FC03FF80000000FC03FF00000000FC07FF000000007C07FF000000007C07FF000000007 +C07FE00000000000FFE00000000000FFE00000000000FFE00000000000FFE00000000000 +FFE00000000000FFE00000000000FFE00000000000FFE00000000000FFE00000000000FF +E000000000007FE000000000007FF000000000007FF000000003C07FF000000003C03FF0 +00000003C03FF800000003C01FF800000007C01FFC00000007800FFE0000000F8007FE00 +00000F0003FF0000001F0001FFC000003E0000FFE00000FC00007FF80003F800001FFF80 +1FF0000007FFFFFFC0000001FFFFFF000000003FFFFC0000000003FFC0000032307CAE3B +>I76 +DII82 D<001FF8038000FFFF078003FFFFCF8007FF +FFFF801FF00FFF801FC001FF803F80007F807F00003F807E00001F807E00001F80FE0000 +0F80FE00000F80FE00000780FF00000780FF80000780FFC0000000FFF00000007FFF0000 +007FFFF000007FFFFF80003FFFFFE0001FFFFFF8000FFFFFFE0007FFFFFF0003FFFFFF80 +00FFFFFFC0001FFFFFC00001FFFFE000000FFFE0000000FFF00000003FF00000001FF000 +00000FF0F000000FF0F0000007F0F0000007F0F0000007F0F8000007F0F8000007E0FC00 +000FE0FE00000FC0FF00001FC0FFC0003F80FFFC00FF00FFFFFFFE00F9FFFFFC00F03FFF +F000E003FF800024307CAE2D>I<3FFFFFFFFFFF003FFFFFFFFFFF003FFFFFFFFFFF003F +FFFFFFFFFF007FE00FFC01FF807F800FFC007F807E000FFC001F807C000FFC000F807C00 +0FFC000F8078000FFC00078078000FFC00078078000FFC00078078000FFC000780F0000F +FC0003C0F0000FFC0003C0F0000FFC0003C0F0000FFC0003C000000FFC00000000000FFC +00000000000FFC00000000000FFC00000000000FFC00000000000FFC00000000000FFC00 +000000000FFC00000000000FFC00000000000FFC00000000000FFC00000000000FFC0000 +0000000FFC00000000000FFC00000000000FFC00000000000FFC00000000000FFC000000 +00000FFC00000000000FFC00000000000FFC00000000000FFC00000000000FFC00000000 +000FFC00000000000FFC000000003FFFFFFF0000003FFFFFFF0000003FFFFFFF0000003F +FFFFFF0000322D7DAC39>II<007FF8000003FFFF00000FFF +FFC0001FE01FF0001FF007F8001FF003FC001FF003FC001FF001FE000FE001FE0007C001 +FE00000001FE00000001FE000000FFFE00003FFFFE0001FFFFFE0007FFC1FE001FFC01FE +003FE001FE007FC001FE00FF8001FE00FF0001FE00FF0001FE00FF0001FE00FF0003FE00 +FF8003FE007FC007FF003FF03FFFF81FFFFEFFF807FFF87FF800FFC01FF8251E7E9D28> +97 D<0000000FC0000003FFC0000003FFC0000003FFC0000003FFC00000003FC0000000 +3FC00000003FC00000003FC00000003FC00000003FC00000003FC00000003FC00000003F +C00000003FC00000003FC0000FF83FC0007FFF3FC001FFFFFFC007FC07FFC00FF001FFC0 +1FE0007FC03FE0003FC03FC0003FC07FC0003FC07F80003FC07F80003FC0FF80003FC0FF +80003FC0FF80003FC0FF80003FC0FF80003FC0FF80003FC0FF80003FC0FF80003FC0FF80 +003FC07F80003FC07FC0003FC03FC0003FC03FC0007FC01FE000FFC00FF003FFC007FC0F +FFFC01FFFFBFFC00FFFE3FFC001FF03FFC262E7DAD2D>100 D<000FFC00007FFF8001FF +FFE007FC0FF00FF003F81FE001FC1FE000FC3FC000FE7FC0007E7F80007E7F80007FFF80 +007FFF80007FFFFFFFFFFFFFFFFFFFFFFFFFFF800000FF800000FF8000007F8000007FC0 +00007FC000003FC0000F1FE0000F0FF0001F07F8007E03FE01FC01FFFFF8007FFFE00007 +FF00201E7E9D25>I<0000FF800007FFE0001FFFF0007FC7F000FF0FF801FE0FF801FE0F +F803FC0FF803FC0FF803FC07F003FC008003FC000003FC000003FC000003FC000003FC00 +00FFFFFC00FFFFFC00FFFFFC00FFFFFC0003FC000003FC000003FC000003FC000003FC00 +0003FC000003FC000003FC000003FC000003FC000003FC000003FC000003FC000003FC00 +0003FC000003FC000003FC000003FC000003FC000003FC000003FC000003FC00003FFFE0 +003FFFE0003FFFE0003FFFE0001D2E7EAD19>I<00000007C0001FF81FF000FFFF7FF803 +FFFFFDF807F81FF3F80FE007F3F81FE007F9F81FC003F8F03FC003FC003FC003FC003FC0 +03FC003FC003FC003FC003FC003FC003FC001FC003F8001FE007F8000FE007F00007F81F +E00007FFFFC0000FFFFF00000F1FF800001E000000001F000000001F000000001F800000 +001FFFFFC0000FFFFFF8000FFFFFFF0007FFFFFF8007FFFFFFC00FFFFFFFE03FFFFFFFE0 +7F00003FF0FE00000FF0FC000007F0FC000003F0FC000003F0FC000003F0FE000007F07F +00000FE03F80001FC01FF000FF8007FFFFFE0001FFFFF800001FFF8000252D7E9E29>I< +07C00FE01FF03FF83FF83FF83FF83FF81FF00FE007C000000000000000000000000003F0 +FFF0FFF0FFF0FFF00FF00FF00FF00FF00FF00FF00FF00FF00FF00FF00FF00FF00FF00FF0 +0FF00FF00FF00FF00FF00FF00FF0FFFFFFFFFFFFFFFF102F7CAE17>105 +D<03F0FFF0FFF0FFF0FFF00FF00FF00FF00FF00FF00FF00FF00FF00FF00FF00FF00FF00F +F00FF00FF00FF00FF00FF00FF00FF00FF00FF00FF00FF00FF00FF00FF00FF00FF00FF00F +F00FF00FF00FF00FF00FF00FF0FFFFFFFFFFFFFFFF102E7CAD17>108 +D<07E01FF000FFE07FFC00FFE1FFFF00FFE7E0FF80FFEF807F800FFE007FC00FFC003FC0 +0FFC003FC00FF8003FC00FF8003FC00FF0003FC00FF0003FC00FF0003FC00FF0003FC00F +F0003FC00FF0003FC00FF0003FC00FF0003FC00FF0003FC00FF0003FC00FF0003FC00FF0 +003FC00FF0003FC00FF0003FC00FF0003FC00FF0003FC0FFFF03FFFCFFFF03FFFCFFFF03 +FFFCFFFF03FFFC261E7C9D2D>110 D<0007FE0000007FFFE00001FFFFF80003FC03FC00 +07F000FE000FE0007F001FC0003F803FC0003FC03F80001FC07F80001FE07F80001FE0FF +80001FF0FF80001FF0FF80001FF0FF80001FF0FF80001FF0FF80001FF0FF80001FF0FF80 +001FF07F80001FE07F80001FE07FC0003FE03FC0003FC01FC0003F801FE0007F800FF000 +FF0003FC03FC0001FFFFF800007FFFE0000007FE0000241E7E9D29>I<03F03FE000FFF1 +FFFC00FFF7FFFF00FFFFC0FF80FFFE007FC00FFC003FE00FF8001FF00FF0001FF00FF000 +0FF80FF0000FF80FF0000FFC0FF00007FC0FF00007FC0FF00007FC0FF00007FC0FF00007 +FC0FF00007FC0FF00007FC0FF00007FC0FF0000FF80FF0000FF80FF0000FF80FF0001FF0 +0FF8001FF00FFC003FE00FFE007FC00FFF81FF800FF7FFFE000FF1FFF8000FF07FC0000F +F00000000FF00000000FF00000000FF00000000FF00000000FF00000000FF00000000FF0 +0000000FF0000000FFFF000000FFFF000000FFFF000000FFFF000000262B7D9D2D>I<07 +E07E00FFE1FF80FFE3FFE0FFE78FE0FFEF1FF00FFE1FF00FFC1FF00FFC1FF00FF80FE00F +F807C00FF800000FF000000FF000000FF000000FF000000FF000000FF000000FF000000F +F000000FF000000FF000000FF000000FF000000FF000000FF000000FF00000FFFF8000FF +FF8000FFFF8000FFFF80001C1E7D9D22>114 D<01FF8E0007FFFE001FFFFE003F00FE00 +7C007E0078003E00F8001E00F8001E00FC001E00FF000000FFF00000FFFF80007FFFE000 +3FFFF8001FFFFE000FFFFE0003FFFF00003FFF800000FF8000003F80F0001F80F0000F80 +F8000F80F8000F80FC001F00FE001F00FF807E00FFFFFC00FBFFF000E0FF8000191E7D9D +20>I118 D120 +D E /Fh 1 1 df0 +D E /Fi 2 51 df<00E00001E00007E000FFE000F9E00001E00001E00001E00001E00001 +E00001E00001E00001E00001E00001E00001E00001E00001E00001E00001E00001E00001 +E00001E00001E00001E00001E00001E00001E00001E00001E00003F000FFFFC0FFFFC012 +217AA01E>49 D<01FC0007FF801C0FC03003E06001F06000F8F800F8FC00FCFC00FCFC00 +7C78007C3000FC0000FC0000F80000F80001F00003E00003C0000780000F00001E000038 +0000700000E00001C00C03800C0600180C00181800183FFFF87FFFF8FFFFF0FFFFF01621 +7CA01E>I E /Fj 6 117 df<00FFFE07FFF000FFFE07FFF00007C0003E000007C0003E00 +000F80007C00000F80007C00000F80007C00000F80007C00001F0000F800001F0000F800 +001F0000F800001F0000F800003E0001F000003E0001F000003E0001F000003FFFFFF000 +007FFFFFE000007C0003E000007C0003E000007C0003E00000F80007C00000F80007C000 +00F80007C00000F80007C00001F0000F800001F0000F800001F0000F800001F0000F8000 +03E0001F000003E0001F000003E0001F000007E0003F0000FFFE07FFF000FFFE07FFF000 +2C227CA132>72 D<1FFFFFFFFC1FFFFFFFFC1F003E007C3C003E001C38007C001C30007C +001C70007C001860007C00186000F80018C000F80018C000F80018C000F800180001F000 +000001F000000001F000000001F000000003E000000003E000000003E000000003E00000 +0007C000000007C000000007C000000007C00000000F800000000F800000000F80000000 +0F800000001F000000001F000000001F000000003F0000001FFFFE00001FFFFC00002622 +7DA124>84 D<0038007C007C007800700000000000000000000000000000000007801FC0 +31E060E0E1E0C1E0C1E003C003C003C0078007800F000F040F061E0C1E0C1E181E300FE0 +07C00F227DA116>105 D<00F800000FF000000FF0000000F0000000F0000001E0000001 +E0000001E0000001E0000003C0000003C0000003C0000003C000000780000007803C0007 +80FE000781C3000F030F000F0E1F000F181F000F301E001EE00C001FE000001FFC00001E +7E00003C0F00003C0F80003C0781003C078180780F0300780F03007807860078078E00F0 +03FC006000F00019237CA221>107 D<003F8000FFE001C0700380700700F00700F00700 +4007C00007FE0003FF8001FFC0003FE00003F03000F07800F0F800E0F800E0F001C07007 +803FFF000FF80014157D941D>115 D<00700000F00000F00000F00000F00001E00001E0 +0001E00001E00003C000FFFF80FFFF8003C0000780000780000780000780000F00000F00 +000F00000F00001E00001E00001E01001E01803C03003C06003C0C001E38000FF00007C0 +00111F7D9E18>I E /Fk 6 107 df0 D<60000000C0E0000001C070000003C078000007803C00000F001E00001E000F +00003C00078000780003C000F00001E001E00000F003C0000078078000003C0F0000001E +1E0000000F3C00000007F800000003F000000001E000000003F000000007F80000000F3C +0000001E1E0000003C0F0000007807800000F003C00001E001E00003C000F00007800078 +000F00003C001E00001E003C00000F00780000078070000003C0E0000001C060000000C0 +222376A137>2 D<0001C000000000000001C000000000000001C0000000000000038000 +000000000003800000000000000380000000000000070000000000000007000000000000 +000E000000000000001E000000000000001C000000000000003800000000000000700000 +0000000001E000000000000003C00000000000000F800000000000007FFFFFFFFFFFFFF8 +FFFFFFFFFFFFFFFC7FFFFFFFFFFFFFF80F8000000000000003C000000000000001E00000 +0000000000700000000000000038000000000000001C000000000000001E000000000000 +000E00000000000000070000000000000007000000000000000380000000000000038000 +0000000000038000000000000001C000000000000001C000000000000001C00000000000 +3E237CA147>32 D<0001C000000E00000001C000000E00000001C000000E000000038000 +000700000003800000070000000380000007000000070000000380000007000000038000 +000E00000001C000001E00000001E000001C00000000E000003800000000700000700000 +0000380001E0000000001E0003C0000000000F000F800000000007C07FFFFFFFFFFFFFF8 +FFFFFFFFFFFFFFFC7FFFFFFFFFFFFFF80F800000000007C003C0000000000F0001E00000 +00001E0000700000000038000038000000007000001C00000000E000001E00000001E000 +000E00000001C00000070000000380000007000000038000000380000007000000038000 +0007000000038000000700000001C000000E00000001C000000E00000001C000000E0000 +3E237CA147>36 D<7FFF800000FFFFF800007FFFFE000000007F0000000007C000000003 +E000000000F00000000078000000003C000000001C000000000E000000000E0000000007 +00000000070000000003800000000380000000038000000001C000000001C000000001C0 +FFFFFFFFC0FFFFFFFFC0FFFFFFFFC000000001C000000001C000000001C0000000038000 +000003800000000380000000070000000007000000000E000000000E000000001C000000 +003C000000007800000000F000000003E000000007C00000007F00007FFFFE0000FFFFF8 +00007FFF800000222B7AA52F>51 D106 D E /Fl 22 122 df<0001FC0000000FFF00 +00003E07800000F801C00401F001E00C03E000F00C07C000F80C0F8000F8181F80007C18 +1F00007C303F00007C303E00007C607E00007C607E00007CC07E00007CC0FC00007D80FC +00007F00FC00007F00FC00007E00FC00007C00FC00007C007C00007C007C0000FC003C00 +01FE003E00073E0C1E001E1E0C0F80F80E1803FFE007F000FF0003E0261D7D9C2D>11 +D<0000007E00000003FF8000000F81C000001C00E000003000F000006000F80000C000F8 +00018000F800030000F800030000F800060000F8000C0000F8000C0001F000180001F000 +180003E000180003C0003000078000301FEF0000303FFE0000303FFE0000601FEF000060 +00078000600007C000600007C000C00007C000C00003E000C00007E000C00007E0018000 +07E001800007E001800007E001800007E00300000FC00300000FC00300000FC00300001F +800700001F000700001F000780003E000780007C000CC00078000CC000F0000C7003E000 +0C3C0F8000180FFF00001803F80000180000000018000000003000000000300000000030 +0000000030000000006000000000600000000060000000006000000000C000000000C000 +000000C000000000253B7EAD28>I<387CFEFEFF7F3B030303030606060C0C1830604008 +147A8614>59 D<00000000700000000000F00000000000F00000000001F00000000001F0 +0000000003F00000000007F00000000007F8000000000DF8000000001DF80000000019F8 +0000000031F80000000071F80000000061F800000000C1FC00000000C1FC0000000180FC +0000000300FC0000000300FC0000000600FC0000000E00FC0000000C00FC0000001800FE +00000038007E00000030007E00000060007E00000060007E000000C0007E00000180007E +000001FFFFFF000003FFFFFF00000700003F00000600003F00000C00003F00001C00003F +00001800003F00003000003F80003000003F80006000001F8000C000001F8000C000001F +8001C000001F8003C000001F800FC000003FC0FFF80003FFFCFFF80003FFFC2E2E7DAD35 +>65 D<001FFFFFFF00001FFFFFFFC00000FE0007F00000FC0001F80000FC0000FC0000FC +00007E0000FC00007E0001F800007F0001F800007F0001F800007F0001F800007F0003F0 +00007E0003F00000FE0003F00000FE0003F00001FC0007E00001F80007E00003F00007E0 +0007E00007E0001FC0000FC0007F00000FC003FC00000FFFFFF800000FFFFFFE00001F80 +003F80001F80000FC0001F80000FE0001F800007E0003F000007F0003F000007F0003F00 +0003F0003F000003F0007E000003F0007E000007F0007E000007F0007E000007E000FC00 +000FE000FC00000FC000FC00001FC000FC00003F8001F800007F0001F80000FE0001F800 +03F80003F8000FF000FFFFFFFFC000FFFFFFFC0000302D7CAC35>I<0000007FC0010000 +03FFF80300001FC03C0700007E00070F0001F800039E0003E00001BE000FC00000FE001F +8000007E003E0000007C007E0000003C00FC0000003C01F80000003C03F00000003803F0 +0000003807E0000000380FE0000000380FC0000000301FC0000000103F80000000003F80 +000000003F00000000007F00000000007F00000000007F0000000000FE0000000000FE00 +00000000FE0000000000FE0000000000FE0000000000FE0000000000FE0000000100FE00 +00000180FE00000003007E00000003007E00000006007E00000006003E0000000C003F00 +000018001F00000018001F00000030000F800000E00007C00001C00003E00007000000F8 +001E0000007E00F80000001FFFE000000003FF000000302F7CAD32>I<001FFFF000FFFE +001FFFF000FFFE0000FF80001FE000007F00001F0000007F00001E0000003F8000180000 +003F8000300000001F8000600000001FC000C00000000FC001800000000FE00300000000 +07E0060000000007F00C0000000007F0180000000003F8300000000003F8600000000001 +FCC00000000001FD800000000000FF000000000000FE0000000000007F0000000000007F +0000000000003F8000000000007F800000000000DF8000000000019FC000000000030FC0 +00000000060FE0000000000E07E0000000001C07F0000000003807F0000000007003F800 +000000E003F800000001C001FC000000038001FC000000070000FE0000000E0000FE0000 +001C00007F0000003800007F0000007000003F800000E000003F800003E000003F80000F +E000007FC000FFFC0003FFFE00FFFC0003FFFE00372D7DAC3A>88 +D<0007E000001FF0C0007C19E000F00FE001E007E003C007E007C007E00F8007C01F8007 +C01F0007C03F0007C03E000F807E000F807E000F807E000F80FC001F00FC001F00FC001F +00FC001F02FC003E06FC003E06FC003E067C007E067C007E0C3C01FE0C1C03BE180E0E1E +3007FC0FE001F003C01F1D7D9C25>97 D<0000001F000003FF000003FF0000003F000000 +3E0000003E0000003E0000003E0000007C0000007C0000007C0000007C000000F8000000 +F8000000F8000000F8000001F00007E1F0001FF1F0007C19F000F00FE001E007E003C007 +E007C007E00F8007C01F8007C01F0007C03F0007C03E000F807E000F807E000F807E000F +80FC001F00FC001F00FC001F00FC001F02FC003E06FC003E06FC003E067C007E067C007E +0C3C01FE0C1C03BE180E0E1E3007FC0FE001F003C0202E7DAD24>100 +D<0003F800001FFE00007E070000F8038001E0018003C001C007C001C00F8003801F8003 +803F0007003F001E007E00FC007FFFF0007FFF00007E0000007C000000FC000000FC0000 +007C0000007C0000007C0000007C0000403C0000C03C0001801E0003000E001E000780FC +0003FFE00000FF00001A1D7D9C21>I<000300000F80000FC0001F80000F800006000000 +0000000000000000000000000000000000000000000000000000000003E00007F8000C3C +00183C00303E00603E00603E00607C00C07C00C07C0000F80000F80001F00001F00001F0 +0003E00003E00007C00007C04007C0C00F80C00F80C00F81801F01800F03000F0600070C +0003F80001F000122D7EAC18>105 D<003E000007FE000007FE0000007E0000007C0000 +007C0000007C0000007C000000F8000000F8000000F8000000F8000001F0000001F00000 +01F0000001F0000003E0000003E001E003E007F003E01E1807C0383C07C0607C07C0C0FC +07C180FC0F8300F80F8600700F8C00000FB800001FF000001FF000001FFF00001F3F8000 +3E07C0003E03E0003E01F0003E01F0087C01F0187C01F0187C01F0187C01F030F801F030 +F801F060F800F060F800F0C0F0007F8060001F001E2E7CAD25>107 +D<078007F0007E00000FE01FFC03FF800018F0781E0783C0003078E00F0C01E000607980 +0F9801F000607F000FB001F000607E000FE001F000C0FE000FC001F000C0FC000FC001F0 +00C0F8000F8001F00000F8000F8001F00001F0001F0003E00001F0001F0003E00001F000 +1F0003E00001F0001F0007C00003E0003E0007C00003E0003E0007C00003E0003E000F80 +0003E0003E000F808007C0007C000F818007C0007C001F018007C0007C001F018007C000 +7C001F03000F8000F8003E03000F8000F8001E06000F8000F8001E0C000F8000F8000E18 +000F0000F00007F000060000600003E000391D7E9C3E>109 D<07C007E0000FE03FF800 +18F0783C003078C01E003079801F00607F001F00607E001F00C0FC001F00C0FC001F00C0 +F8001F0000F8001F0001F0003E0001F0003E0001F0003E0001F0007C0003E0007C0003E0 +007C0003E000F80003E000F80807C000F81807C001F01807C001F01807C001F0300F8003 +E0300F8001E0600F8001E0C00F8000E1800F00007F000600003E00251D7E9C2B>I<0003 +F800000FFF00003E078000F803C001F001E003E001F007C001F00F8001F01F8000F81F00 +01F83F0001F83E0001F87E0001F87E0001F87E0001F8FC0003F0FC0003F0FC0003F0FC00 +07E0FC0007C0FC0007C07C000F807C001F003C001E003E003C001E00F8000F83E00003FF +C00000FE00001D1D7D9C22>I<007C01F80000FE07FE00018F0E0F0003079807800307B0 +07800607E007C00607C003C00C0F8003E00C0F8003E00C0F8003E0000F8007E0001F0007 +E0001F0007E0001F0007E0001F0007E0003E000FC0003E000FC0003E000FC0003E001F80 +007C001F80007C001F00007C003E00007C003C0000FC007C0000FE00F80000FE01E00000 +FB87C00001F1FF800001F07C000001F000000001F000000003E000000003E000000003E0 +00000003E000000007C000000007C000000007C000000007C00000000FC0000000FFFC00 +0000FFFC000000232A829C24>I<07C01F000FE07F8018F0E0C0307981E0307B03E0607E +07E0607C07E0C0FC07C0C0F80380C0F8000000F8000001F0000001F0000001F0000001F0 +000003E0000003E0000003E0000003E0000007C0000007C0000007C0000007C000000F80 +00000F8000000F8000000F8000000F000000060000001B1D7E9C20>114 +D<000FE0003FF800781C00E00601C00F03C01F03C01F07C01E07C00C07C00007F00007FF +8003FFE001FFF000FFF8003FFC0003FC00007C00003C38003C7C003CFC003CFC0038F800 +78E000F06001E03807C01FFF0007FC00181D7C9C21>I<000E00001F00001F00003E0000 +3E00003E00003E00007C00007C00007C00007C0000F8007FFFF8FFFFF800F80001F00001 +F00001F00001F00003E00003E00003E00003E00007C00007C00007C00007C0000F80000F +80000F80000F80101F00301F00301F00601F00601F00C01F01801F03000F0E0007FC0001 +F00015297EA819>I<03E0000E0007F8001F000C3C001F00183C003E00303E003E00303E +003E00603E003E00607C007C00C07C007C00C07C007C0000F8007C0000F800F80001F000 +F80001F000F80001F000F80003E001F00003E001F00003E001F00003E001F02003C003E0 +6007C003E06007C003E06003C003E06003C007E0C003E00FE0C001E01BE18000F071E300 +007FE0FE00001F803C00231D7E9C29>I<003F007C0000FFC1FF0001C1E383800300F603 +C00600F607C00C00FC0FC01800FC0FC01800F80F803000F807003000F800000000F80000 +0001F000000001F000000001F000000001F000000003E000000003E000000003E0000000 +03E001000007C003003807C003007C07C003007E07C00600FC0FC00C00FC0FC00C007819 +C038007070E0F0003FE07FC0000F801F0000221D7E9C28>120 D<01E0000E07F8001F0E +3C001F183C003E303E003E303E003E603E003E607C007CC07C007CC07C007C00F8007C00 +F800F801F000F801F000F801F000F803E001F003E001F003E001F003E001F003C003E007 +C003E007C003E003C003E003C007C003E00FC001E01FC000F077C0007FEF80001F8F8000 +000F8000000F8000001F0000001F001E003E003E003C007E0078007E00F8007C01E00030 +03C0001C0F80000FFE000003F80000202A7E9C23>I E /Fm 76 123 +df<0000FF807E000007FFE1FF80001F807FC3C0003E007F87E0007C00FF07E000F800FE +07E001F000FE07E003F000FC018003E0007C000003E0007C000003E0007C000003E0007C +000003E0007C000003E0007C000003E0007C000003E0007C000003E0007C000003E0007C +0000FFFFFFFFFC00FFFFFFFFFC0003E0007C000003E0007C000003E0007C000003E0007C +000003E0007C000003E0007C000003E0007C000003E0007C000003E0007C000003E0007C +000003E0007C000003E0007C000003E0007C000003E0007C000003E0007C000003E0007C +000003E0007C000003E0007C000003E0007C000003E0007C000003E0007C000003E0007C +000003E0007C000003E0007C000007F000FE00007FFF0FFFF0007FFF0FFFF0002B2F7FAE +29>11 D<0000FF00000007FFE000001F80F000003E003800007C007C0000F800FC0001F0 +00FC0003F000FC0003E000780003E000300003E000000003E000000003E000000003E000 +000003E000000003E000000003E000000003E0007C00FFFFFFFC00FFFFFFFC0003E000FC +0003E0007C0003E0007C0003E0007C0003E0007C0003E0007C0003E0007C0003E0007C00 +03E0007C0003E0007C0003E0007C0003E0007C0003E0007C0003E0007C0003E0007C0003 +E0007C0003E0007C0003E0007C0003E0007C0003E0007C0003E0007C0003E0007C0003E0 +007C0003E0007C0007F000FE007FFF0FFFE07FFF0FFFE0232F7FAE27>I22 D<3C00F07E01F8FF03FCFF03FCFF83FEFF83FE7F81FE3D80F601 +800601800601800603800E03000C03000C07001C0600180E00381C00703800E07001C060 +018017157EAD23>34 D<3C007E00FF00FF00FF80FF807F803D8001800180018003800300 +0300070006000E001C0038007000600009157AAD14>39 D<00030007000E001C00380070 +00F001E001C003C0078007800F000F001E001E001E003C003C003C003C00780078007800 +78007800F800F800F000F000F000F000F000F000F000F000F000F000F000F800F8007800 +78007800780078003C003C003C003C001E001E001E000F000F000780078003C001C001E0 +00F000700038001C000E0007000310437AB11B>II<000C0000001E0000001E0000001E0000001E +0000001E0000601E0180781E0780FC0C0FC07F0C3F803F8C7F0007CCF80001FFE000007F +8000001E0000007F800001FFE00007CCF8003F8C7F007F0C3F80FC0C0FC0781E0780601E +0180001E0000001E0000001E0000001E0000001E0000000C00001A1D7CB123>I<000003 +800000000003800000000003800000000003800000000003800000000003800000000003 +800000000003800000000003800000000003800000000003800000000003800000000003 +800000000003800000000003800000000003800000000003800000000003800000000003 +800000000003800000000003800000000003800000FFFFFFFFFFFCFFFFFFFFFFFCFFFFFF +FFFFFC000003800000000003800000000003800000000003800000000003800000000003 +800000000003800000000003800000000003800000000003800000000003800000000003 +800000000003800000000003800000000003800000000003800000000003800000000003 +8000000000038000000000038000000000038000000000038000002E2F7CA737>I<3C00 +7E00FF00FF00FF80FF807F803D80018001800180038003000300070006000E001C003800 +7000600009157A8714>II<3C7EFFFFFFFF7E +3C08087A8714>I<003FC00000FFF00003E07C0007C03E000F801F000F000F001E000780 +1E0007803E0007C03E0007C07C0003E07C0003E07C0003E07C0003E07C0003E0FC0003F0 +FC0003F0FC0003F0FC0003F0FC0003F0FC0003F0FC0003F0FC0003F0FC0003F0FC0003F0 +FC0003F0FC0003F0FC0003F0FC0003F0FC0003F0FC0003F07C0003E07C0003E07C0003E0 +7E0007E03E0007C03E0007C03E0007C01F000F800F000F000F801F0007C03E0003F0FC00 +00FFF000003FC0001C2D7DAB23>48 D<000C00003C00007C0003FC00FFFC00FC7C00007C +00007C00007C00007C00007C00007C00007C00007C00007C00007C00007C00007C00007C +00007C00007C00007C00007C00007C00007C00007C00007C00007C00007C00007C00007C +00007C00007C00007C00007C00007C00007C00007C00007C00007C00007C0000FE007FFF +FE7FFFFE172C7AAB23>I<007F800001FFF0000780FC000E003F001C001F8038000FC070 +000FC0600007E0F00007E0FC0007F0FE0007F0FE0003F0FE0003F0FE0003F07C0007F000 +0007F0000007F0000007E000000FE000000FC000001FC000001F8000003F0000007E0000 +007C000000F8000001F0000003E0000007C000000F8000001E0000003C00000078000000 +F0003000E0003001C0003003800060070000600E0000E01FFFFFE03FFFFFE07FFFFFC0FF +FFFFC0FFFFFFC01C2C7DAB23>I<003FC00001FFF00007C0FC000E007E001C003F001C00 +1F803F001FC03F001FC03F800FC03F000FC03F000FC00C001FC000001FC000001F800000 +1F8000003F0000003E0000007C000000F8000003F00000FFC00000FFF0000000FC000000 +3F0000001F8000001FC000000FC000000FE000000FE0000007F0000007F0380007F07C00 +07F0FE0007F0FE0007F0FE0007F0FE000FE0F8000FE060000FC070001FC038001F801E00 +3F000780FC0001FFF000007FC0001C2D7DAB23>I<00000E0000000E0000001E0000003E +0000003E0000007E000000FE000000FE000001BE000003BE0000033E0000063E00000E3E +00000C3E0000183E0000383E0000303E0000603E0000E03E0000C03E0001803E0003803E +0003003E0006003E000E003E000C003E0018003E0038003E0030003E0060003E00E0003E +00FFFFFFFCFFFFFFFC00003E0000003E0000003E0000003E0000003E0000003E0000003E +0000003E0000003E0000007F00001FFFFC001FFFFC1E2D7EAC23>I<0C0001800FC01F80 +0FFFFF000FFFFE000FFFFC000FFFF0000FFFC0000C7E00000C0000000C0000000C000000 +0C0000000C0000000C0000000C0000000C0000000C1FC0000C7FF8000DE07C000F801F00 +0F001F800E000F800C0007C0000007E0000007E0000003E0000003F0000003F0000003F0 +000003F0780003F0FC0003F0FC0003F0FC0003F0FC0003F0F80007E0E00007E0600007C0 +70000FC038000F801C001F000E003E000780F80001FFE000007F80001C2D7DAB23>I<00 +03F800000FFE00003E078000F8018001F007C003E00FC007C00FC00F800FC00F800FC01F +0007801F0000003E0000003E0000007E0000007E0000007C0000007C0FC000FC3FF000FC +F07C00FDC01E00FF800F00FF000F80FF0007C0FE0007E0FE0007E0FE0003E0FC0003F0FC +0003F0FC0003F0FC0003F07C0003F07C0003F07C0003F07E0003F07E0003F03E0003E03E +0007E01E0007E01F0007C00F000F8007801F0003C03E0001E07C00007FF000001FC0001C +2D7DAB23>I<300000003C0000003FFFFFF83FFFFFF83FFFFFF07FFFFFF07FFFFFE07000 +01C06000018060000380C0000700C0000E00C0000C0000001C0000003800000030000000 +70000000E0000001C0000001C00000038000000380000007000000070000000F0000000E +0000001E0000001E0000003E0000003E0000003E0000003C0000007C0000007C0000007C +0000007C000000FC000000FC000000FC000000FC000000FC000000FC000000FC000000FC +000000FC0000007800001D2E7CAC23>I<001FC00000FFF00003E07C0007801E000F000F +001E0007801E0007803C0003C03C0003C03C0003C03C0003C03E0003C03E0007C03F0007 +801FC00F801FE00F001FF81E000FFC3C0007FFF80003FFE00000FFE000003FF80000FFFC +0003C7FF000783FF801F00FFC01E003FC03C001FE07C0007E0780003F0F80003F0F00001 +F0F00000F0F00000F0F00000F0F00000F0F80000E0780001E07C0001C03C0003C01E0007 +800F800F0007E03C0001FFF000003FC0001C2D7DAB23>I<003F800000FFF00003E07800 +07C03E000F801F001F000F003E000F803E0007807E0007C07C0007C0FC0007E0FC0003E0 +FC0003E0FC0003E0FC0003F0FC0003F0FC0003F0FC0003F0FC0003F07C0007F07E0007F0 +7E0007F03E000FF01F000FF00F001FF007803BF003E0F3F000FFC3F0003F03E0000003E0 +000007E0000007E0000007C0000007C000000FC01E000F803F000F003F001F003F003E00 +3F003C003E0078001C00F0000E03E00007FF800001FE00001C2D7DAB23>I<3C7EFFFFFF +FF7E3C000000000000000000000000003C7EFFFFFFFF7E3C081D7A9C14>I61 D<000001800000000003C00000000003C00000000003C00000000007 +E00000000007E0000000000FF0000000000FF0000000000FF0000000001BF80000000019 +F80000000019F80000000030FC0000000030FC0000000070FE00000000607E0000000060 +7E00000000C03F00000000C03F00000000C03F00000001801F80000001801F8000000380 +1FC0000003000FC0000003000FC00000060007E00000060007E00000060007E000000C00 +03F000000C0003F000001FFFFFF800001FFFFFF80000180001F80000300000FC00003000 +00FC0000300000FC00006000007E00006000007E0000E000007F0000C000003F0000C000 +003F0001C000001F8003C000001F8007C000001FC00FF000003FE0FFFC0003FFFFFFFC00 +03FFFF302F7EAE35>65 DI< +00001FF000C00000FFFE01C00003F00F83C0000F8001E3C0003F000077C0007C00003FC0 +01F800001FC003F000000FC007E0000007C007E0000007C00FC0000003C01FC0000003C0 +1F80000001C03F80000001C03F00000001C07F00000000C07F00000000C07F00000000C0 +FE0000000000FE0000000000FE0000000000FE0000000000FE0000000000FE0000000000 +FE0000000000FE0000000000FE0000000000FE0000000000FE00000000007F0000000000 +7F00000000C07F00000000C03F00000000C03F80000000C01F80000001C01FC000000180 +0FC00000018007E00000038007E00000070003F00000060001F800000E00007C00001C00 +003F00007800000F8001E0000003F00FC0000000FFFE000000001FF000002A2F7CAD33> +IIII<00001FF000 +C00000FFFE01C00003F00F83C0000F8001E3C0003F000077C0007C00003FC001F800001F +C003F000000FC007E0000007C007E0000007C00FC0000003C01FC0000003C01F80000001 +C03F80000001C03F00000001C07F00000000C07F00000000C07F00000000C0FE00000000 +00FE0000000000FE0000000000FE0000000000FE0000000000FE0000000000FE00000000 +00FE0000000000FE0000000000FE00001FFFFEFE00001FFFFE7F0000001FE07F0000000F +C07F0000000FC03F0000000FC03F8000000FC01F8000000FC01FC000000FC00FC000000F +C007E000000FC007E000000FC003F000000FC001F800001FC0007C00001FC0003F00003F +C0000F8000F3C00003F007C1C00000FFFF00C000001FF800002F2F7CAD37>III<007FFFF0007FFFF00000FF0000007E0000 +007E0000007E0000007E0000007E0000007E0000007E0000007E0000007E0000007E0000 +007E0000007E0000007E0000007E0000007E0000007E0000007E0000007E0000007E0000 +007E0000007E0000007E0000007E0000007E0000007E0000007E0000007E0000007E0000 +007E0000007E0000007E0038007E00FE007E00FE007E00FE007E00FE00FE00FE00FC0078 +00FC007001F8003803F0001E07E00007FF800001FC00001C2E7DAC24>IIIII<00003FF000 +000001FFFE00000007E01F8000001F8007E000003E0001F00000FC0000FC0001F800007E +0003F000003F0007E000001F8007C000000F800FC000000FC01F80000007E01F80000007 +E03F00000003F03F00000003F07F00000003F87F00000003F87E00000001F87E00000001 +F8FE00000001FCFE00000001FCFE00000001FCFE00000001FCFE00000001FCFE00000001 +FCFE00000001FCFE00000001FCFE00000001FCFE00000001FC7F00000003F87F00000003 +F87F00000003F83F00000003F03F80000007F01F80000007E01F80000007E00FC000000F +C00FE000001FC007E000001F8003F000003F0001F800007E0000FC0000FC00007E0001F8 +00001F8007E0000007E01F80000001FFFE000000003FF000002E2F7CAD37>II<00003FF000000001FFFE00000007E0 +1F8000001F8007E000003E0001F00000FC0000FC0001F800007E0003F000003F0007E000 +001F8007C000000F800FC000000FC01F80000007E01F80000007E03F80000007F03F0000 +0003F07F00000003F87F00000003F87F00000003F87E00000001F8FE00000001FCFE0000 +0001FCFE00000001FCFE00000001FCFE00000001FCFE00000001FCFE00000001FCFE0000 +0001FCFE00000001FCFE00000001FC7E00000001F87F00000003F87F00000003F83F0000 +0003F03F00000003F01F80000007E01F80000007E00FC007800FC00FC01FE00FC007E038 +701F8003F070183F0001F8601C7E0000FC600CFC00007E600FF800001FF00FE0000007F8 +1F80000001FFFF000C00003FF7000C00000007800C00000007801C00000003C01C000000 +03E03C00000003F07800000003FFF800000001FFF800000001FFF000000001FFF0000000 +00FFE0000000007FC0000000001F002E3B7CAD37>II<003F803001FFF07007C07C700F000EF01E0007 +F03C0003F0780001F0780000F0700000F0F0000070F0000070F0000070F0000030F80000 +30F8000030FC0000007E0000007F0000003FE000003FFE00001FFFE0000FFFFC0007FFFF +0001FFFF80003FFFE00003FFE000003FF0000007F8000001F8000000F8000000FC000000 +7CC000007CC000003CC000003CC000003CE000003CE000003CE0000078F0000078F80000 +70FC0000F0FE0001E0F78003C0E3F00F00E07FFE00C00FF0001E2F7CAD27>I<7FFFFFFF +FFF87FFFFFFFFFF87F000FC003F87C000FC000F870000FC0003870000FC0003860000FC0 +001860000FC00018E0000FC0001CE0000FC0001CC0000FC0000CC0000FC0000CC0000FC0 +000CC0000FC0000CC0000FC0000C00000FC0000000000FC0000000000FC0000000000FC0 +000000000FC0000000000FC0000000000FC0000000000FC0000000000FC0000000000FC0 +000000000FC0000000000FC0000000000FC0000000000FC0000000000FC0000000000FC0 +000000000FC0000000000FC0000000000FC0000000000FC0000000000FC0000000000FC0 +000000000FC0000000000FC0000000000FC0000000000FC0000000000FC0000000001FE0 +0000001FFFFFE000001FFFFFE0002E2D7EAC33>II<7FFFE003FFF87FFFE003FFF803FF0000FF8001FE +00007E0000FE00007C00007F00007800003F00007000003F8000E000001FC000C000000F +E0018000000FE00380000007F00700000003F80600000001F80C00000001FC1C00000000 +FE38000000007F30000000007F60000000003FE0000000001FC0000000000FC000000000 +0FE00000000007F00000000007F8000000000DF8000000001DFC0000000038FE00000000 +307F00000000607F00000000E03F80000001C01FC0000001800FC0000003000FE0000007 +0007F000000E0003F800000C0003F800001C0001FC0000380000FE00007000007E000070 +00007F0000F000003F8003F000003FC00FF800007FE0FFFE0003FFFFFFFE0003FFFF302D +7EAC35>88 DI<3FFFFFFFC03FFFFFFFC03FF0001FC03F80003F803E00007F003C00007F003800 +00FE00380001FC00700001FC00700003F800700007F000600007F00060000FE00060001F +C00060001FC00000003F800000007F000000007F00000000FE00000001FC00000001FC00 +000003F800000003F000000007F00000000FE00000000FE00000001FC00000003F800000 +003F800060007F00006000FE00006000FE00006001FC00006003F80000E003F80000E007 +F00000E00FE00000C00FE00001C01FC00001C03F800003C03F800007C07F00001FC0FE00 +00FFC0FFFFFFFFC0FFFFFFFFC0232D7CAC2B>I<03000C07001C0E00381C00703800E030 +00C07001C0600180600180E00380C00300C00300C00300DE0378FF03FCFF83FEFF83FE7F +81FE7F81FE3F00FC1E0078171577AD23>92 D<030007000E001C00380030007000600060 +00E000C000C000C000DE00FF00FF80FF807F807F803F001E0009157BAD14>96 +D<00FF000007FFC0000F01F0001C00F8003F007C003F003E003F003E003F003F001E001F +0000001F0000001F0000001F0000001F000007FF00007FFF0001FE1F0007F01F001FC01F +003F801F007F001F007E001F00FE001F06FC001F06FC001F06FC001F06FC003F06FE003F +067E007F067F00EF8C1F83C7FC0FFF03F801FC01E01F207D9E23>I<07C0000000FFC000 +0000FFC00000000FC000000007C000000007C000000007C000000007C000000007C00000 +0007C000000007C000000007C000000007C000000007C000000007C000000007C0000000 +07C0FE000007C7FF800007CF03E00007DC01F00007F8007C0007F0007E0007E0003E0007 +C0001F0007C0001F8007C0001F8007C0000F8007C0000FC007C0000FC007C0000FC007C0 +000FC007C0000FC007C0000FC007C0000FC007C0000FC007C0000FC007C0001F8007C000 +1F8007C0001F0007C0003F0007E0003E0007F0007C0007B000F80007BC01F000070E07E0 +000607FF80000001FC0000222F7EAD27>I<001FE000007FFC0001F01E0003E0070007C0 +1F800F801F801F001F803F001F803E000F007E0000007E0000007C000000FC000000FC00 +0000FC000000FC000000FC000000FC000000FC000000FC000000FC0000007E0000007E00 +00007E0000C03F0000C01F0001C01F8001800FC0038007E0070001F03E00007FF800001F +C0001A207E9E1F>I<000000F80000001FF80000001FF800000001F800000000F8000000 +00F800000000F800000000F800000000F800000000F800000000F800000000F800000000 +F800000000F800000000F800000000F800000FE0F800007FF8F80001F81EF80003E007F8 +0007C003F8000F8001F8001F0001F8003F0000F8003E0000F8007E0000F8007E0000F800 +FC0000F800FC0000F800FC0000F800FC0000F800FC0000F800FC0000F800FC0000F800FC +0000F800FC0000F8007C0000F8007E0000F8007E0000F8003E0001F8001F0001F8001F80 +03F8000F8007F80003E00EFC0001F03CFFC0007FF0FFC0001FC0F800222F7EAD27>I<00 +1F800000FFF00003E0780007C03E000F801E001F001F001F000F803E000F807E0007807E +0007C07C0007C0FC0007C0FC0007C0FC0007C0FFFFFFC0FFFFFFC0FC000000FC000000FC +000000FC000000FC0000007E0000007E0000003E0000C03F0000C01F0001C00F8003800F +C0030003E00F0001F03C00007FF800001FC0001A207E9E1F>I<0003F0000FFC003E1E00 +7C3F00F83F01F03F01F03F03E00C03E00003E00003E00003E00003E00003E00003E00003 +E00003E00003E000FFFFE0FFFFE003E00003E00003E00003E00003E00003E00003E00003 +E00003E00003E00003E00003E00003E00003E00003E00003E00003E00003E00003E00003 +E00003E00003E00003E00003E00007F0007FFF807FFF80182F7FAE16>I<003F00F800FF +C3FE03E1FF1E07807C1E0F807C0C1F003E001F003E003E001F003E001F003E001F003E00 +1F003E001F003E001F003E001F001F003E001F003E000F807C00078078000FE1F0000CFF +C0001C3F00001C0000001C0000001C0000001E0000001F0000000FFFF8000FFFFF0007FF +FFC00FFFFFF01E0007F83C0000F87800007CF800007CF000003CF000003CF000003CF000 +003CF800007C7C0000F83E0001F01F0003E007E01F8001FFFE00003FF0001F2D7E9D23> +I<07C0000000FFC0000000FFC00000000FC000000007C000000007C000000007C0000000 +07C000000007C000000007C000000007C000000007C000000007C000000007C000000007 +C000000007C000000007C0FE000007C3FF800007C703E00007DE01F00007F801F00007F0 +00F80007F000F80007E000F80007E000F80007C000F80007C000F80007C000F80007C000 +F80007C000F80007C000F80007C000F80007C000F80007C000F80007C000F80007C000F8 +0007C000F80007C000F80007C000F80007C000F80007C000F80007C000F80007C000F800 +0FE001FC00FFFE1FFFC0FFFE1FFFC0222E7EAD27>I<07800FC01FE01FE01FE01FE00FC0 +07800000000000000000000000000000000007C0FFC0FFC00FC007C007C007C007C007C0 +07C007C007C007C007C007C007C007C007C007C007C007C007C007C007C007C007C007C0 +0FE0FFFCFFFC0E2E7EAD14>I<000F00001F80003FC0003FC0003FC0003FC0001F80000F +000000000000000000000000000000000000000000000000000007C000FFC000FFC0000F +C00007C00007C00007C00007C00007C00007C00007C00007C00007C00007C00007C00007 +C00007C00007C00007C00007C00007C00007C00007C00007C00007C00007C00007C00007 +C00007C00007C00007C00007C00007C00007C00007C03007C07807C0FC0F80FC0F80FC0F +00F81F00783E003FF80007E000123C83AD16>I<07C0000000FFC0000000FFC00000000F +C000000007C000000007C000000007C000000007C000000007C000000007C000000007C0 +00000007C000000007C000000007C000000007C000000007C000000007C000000007C01F +FE0007C01FFE0007C00FF00007C007C00007C007800007C00E000007C01C000007C03800 +0007C070000007C0E0000007C3C0000007C7C0000007CFE0000007DFF0000007F9F00000 +07F0F8000007E0FC000007C07E000007C03E000007C01F000007C01F800007C00FC00007 +C007C00007C003E00007C003F00007C001F8000FE003FC00FFFE07FF80FFFE07FF80212E +7EAD25>I<07C0FFC0FFC00FC007C007C007C007C007C007C007C007C007C007C007C007 +C007C007C007C007C007C007C007C007C007C007C007C007C007C007C007C007C007C007 +C007C007C007C007C007C007C007C007C007C00FE0FFFEFFFE0F2E7EAD14>I<07C07F00 +07F000FFC3FFC03FFC00FFC783F0783F000FCE01F8E01F8007DC00F9C00F8007F800FF80 +0FC007F0007F0007C007E0007E0007C007E0007E0007C007C0007C0007C007C0007C0007 +C007C0007C0007C007C0007C0007C007C0007C0007C007C0007C0007C007C0007C0007C0 +07C0007C0007C007C0007C0007C007C0007C0007C007C0007C0007C007C0007C0007C007 +C0007C0007C007C0007C0007C007C0007C0007C007C0007C0007C007C0007C0007C007C0 +007C0007C00FE000FE000FE0FFFE0FFFE0FFFEFFFE0FFFE0FFFE371E7E9D3C>I<07C0FE +0000FFC3FF8000FFC703E0000FDE01F00007F801F00007F000F80007F000F80007E000F8 +0007E000F80007C000F80007C000F80007C000F80007C000F80007C000F80007C000F800 +07C000F80007C000F80007C000F80007C000F80007C000F80007C000F80007C000F80007 +C000F80007C000F80007C000F80007C000F80007C000F8000FE001FC00FFFE1FFFC0FFFE +1FFFC0221E7E9D27>I<001FE000007FF80001F03E0003C00F00078007800F0003C01F00 +03E03E0001F03E0001F07C0000F87C0000F87C0000F8FC0000FCFC0000FCFC0000FCFC00 +00FCFC0000FCFC0000FCFC0000FCFC0000FCFC0000FC7C0000F87C0000F83E0001F03E00 +01F01F0003E01F0003E00F8007C007C00F8001F03E00007FF800001FE0001E207E9E23> +I<07C0FE0000FFC7FF8000FFCF03E0000FDC01F00007F800FC0007F0007E0007E0003E00 +07C0003F0007C0001F8007C0001F8007C0001F8007C0000FC007C0000FC007C0000FC007 +C0000FC007C0000FC007C0000FC007C0000FC007C0000FC007C0001FC007C0001F8007C0 +001F8007C0003F0007C0003F0007E0007E0007F0007C0007F000F80007FC01F00007CE07 +E00007C7FF800007C1FC000007C000000007C000000007C000000007C000000007C00000 +0007C000000007C000000007C000000007C00000000FE0000000FFFE000000FFFE000000 +222B7E9D27>I<000FE01800007FF8380001F81C380003E00E780007C00778000F8003F8 +001F8001F8003F0001F8003F0001F8007E0000F8007E0000F800FE0000F800FC0000F800 +FC0000F800FC0000F800FC0000F800FC0000F800FC0000F800FC0000F800FC0000F8007E +0000F8007E0000F8007E0000F8003F0001F8001F0001F8001F8003F8000FC007F80003E0 +0EF80001F03CF800007FF0F800001FC0F800000000F800000000F800000000F800000000 +F800000000F800000000F800000000F800000000F800000000F800000001FC0000001FFF +C000001FFFC0222B7E9D25>I<0781F8FF87FEFF8E3F0F9C3F07B83F07B03F07F01E07E0 +0007E00007E00007E00007C00007C00007C00007C00007C00007C00007C00007C00007C0 +0007C00007C00007C00007C00007C00007C00007C0000FE000FFFF00FFFF00181E7E9D1C +>I<01FE1807FFB81E01F83C00F8780078F00038F00038F00018F00018F80018FC0018FF +00007FF0003FFF001FFFC00FFFF001FFF8001FFC0001FCC0007EC0003EC0003EE0001EE0 +001EF0001EF0001EF8003CF8003CFC0078FF01F0E3FFC0C0FF0017207E9E1C>I<006000 +00600000600000600000E00000E00000E00001E00003E00003E00007E0001FE000FFFFF0 +FFFFF003E00003E00003E00003E00003E00003E00003E00003E00003E00003E00003E000 +03E00003E00003E00003E00003E01803E01803E01803E01803E01803E01803E01803E038 +01F03001F07000F860003FE0000F80152A7FA81B>I<07C000F800FFC01FF800FFC01FF8 +000FC001F80007C000F80007C000F80007C000F80007C000F80007C000F80007C000F800 +07C000F80007C000F80007C000F80007C000F80007C000F80007C000F80007C000F80007 +C000F80007C000F80007C000F80007C000F80007C000F80007C001F80007C001F80007C0 +01F80007C003F80003E007F80003E00EFC0001F81CFFC0007FF8FFC0001FE0F800221F7E +9D27>IIIII<3FFFFF803FFFFF803F003F803C003F0038007E003000FC007001FC006001F8006003 +F0006007E000600FE000000FC000001F8000003F0000007F0000007E000000FC018001F8 +018003F8018003F0018007E003800FC003801FC003001F8003003F0007007E000F00FE00 +3F00FFFFFF00FFFFFF00191D7E9C1F>I E /Fn 56 123 df<000380000FC0001FC0003F +8000FF0000FE0001F80003F80007F0000FE0000FC0001F80001F80003F00003F00003E00 +007E00007E00007C00007C0000FC0000FC0000F80000F80000F80000F80000F80000F800 +00F80000F80000FC0000FC00007C00007C00007E00007E00003E00003F00003F00001F80 +001F80000FC0000FE00007F00003F80001F80000FE0000FF00003F80001FC0000FC00003 +80123476AD23>40 D<700000FC0000FE00007F00003FC0001FC00007E00007F00003F800 +01FC0000FC00007E00007E00003F00003F00001F00001F80001F80000F80000F80000FC0 +000FC00007C00007C00007C00007C00007C00007C00007C00007C0000FC0000FC0000F80 +000F80001F80001F80001F00003F00003F00007E00007E0000FC0001FC0003F80007F000 +07E0001FC0003FC0007F0000FE0000FC000070000012347AAD23>I<1F003F807FC07FE0 +7FE07FE07FE03FE01FE007E007E00FC01FC07F80FF00FE00FC0070000B12748823>44 +D<7FFFFF80FFFFFFC0FFFFFFC0FFFFFFC0FFFFFFC07FFFFF801A067C9623>I<3E007F00 +FF80FF80FF80FF80FF807F003E000909738823>I<001800003C00007C00007C0000FC00 +01FC0003FC0007FC007FFC00FFFC00FFFC00FF7C007C7C00007C00007C00007C00007C00 +007C00007C00007C00007C00007C00007C00007C00007C00007C00007C00007C00007C00 +007C00007C00007C00007C00007C00007C00007C00007C007FFFFC7FFFFE7FFFFE7FFFFE +7FFFFC172A7AA923>49 D<007FC00003FFF00007FFFC001FFFFE003FFFFF807F80FF807E +003FC0FE000FE0FC0007E0FE0003F0FE0003F0FE0003F0FE0001F07C0001F0380001F000 +0001F0000003F0000003F0000007E0000007E000000FC000001FC000003F8000003F0000 +00FE000001FE000003F8000007F000000FE000003FC000007F800000FE000001FC000003 +F800000FF000E01FE001F03F8001F07FFFFFF0FFFFFFF0FFFFFFF0FFFFFFF07FFFFFE01C +2A7DA923>I<007FC00001FFF00007FFFC000FFFFE001FFFFF003FE07F803F801FC03F80 +0FC03F8007C03F8007C03F8007C00E0007C000000FC000000FC000001F8000003F800001 +FF00007FFE0000FFFC0000FFF80000FFFC00007FFF0000007F8000001FC000000FE00000 +07E0000003F0000003F0000001F0000001F07C0001F0FE0001F0FE0003F0FE0003F0FE00 +07E0FC000FE0FE001FC07FC07FC03FFFFF801FFFFF000FFFFC0003FFF800007FC0001C2B +7DA923>I<1FFFFF003FFFFF803FFFFF803FFFFF803FFFFF003E0000003E0000003E0000 +003E0000003E0000003E0000003E0000003E0000003E0000003E0000003E1FE0003EFFF8 +003FFFFE003FFFFF003FFFFF803FE03FC03F800FC03F0007E01E0007E0000003F0000003 +F0000001F0000001F0380001F0FE0001F0FE0001F0FE0003F0FE0003F0FE0007E0FC000F +E07E001FC07FC0FF803FFFFF001FFFFE0007FFFC0003FFF000007F80001C2A7DA823>53 +D<0007F000001FFC00007FFE0001FFFF0003FFFF8007FC1FC00FE007C00FC0FFE01F83FF +E03F07FFE03E0FFFE07E1FFFF07C1F8FF07C3F07F0783E03F0F87C01F0F87C01F0F07C01 +F0F07800F0F07800F0F07800F0F07800F0F07800F0F07C01F0F87C01F0F87C01F0783E03 +E07C3F07E07C1F8FC07E1FFFC03E0FFF803F07FF001F83FE000FC0F9E00FE007F007FC0F +F003FFFFF001FFFFC0007FFF80001FFE000007F8001C297DA823>64 +D<000FC000000FC000001FE000001FE000001FE000001FE000003CF000003CF000003CF0 +00003CF000003CF000007CF800007CF800007CF8000078780000F87C0000F87C0000F87C +0000F87C0000F87C0001F03E0001F03E0001F03E0001F03E0003F03F0003E01F0003E01F +0003FFFF0003FFFF0007FFFF8007FFFF8007FFFF8007C00F800FC00FC00F8007C00F8007 +C00F8007C07FF03FF8FFF03FFCFFF03FFCFFF03FFC7FF03FF81E2A7EA923>II<000FE0E0003FF9F000FFFDF001 +FFFFF003FFFFF007FC3FF00FF00FF01FC007F01FC007F03F8003F03F0003F07F0001F07E +0001F07E0001F07C0000E0FC000000FC000000F8000000F8000000F8000000F8000000F8 +000000F8000000F8000000F8000000F8000000FC000000FC0000007C0000007E0000E07E +0001F07F0001F03F0001F03F8003F01FC003F01FC007E00FF00FE007FC1FC003FFFF8001 +FFFF0000FFFE00003FF800000FE0001C2B7DA923>I<7FFFF000FFFFFC00FFFFFE00FFFF +FF007FFFFF800F803FC00F801FC00F8007E00F8007E00F8003F00F8001F00F8001F80F80 +00F80F8000F80F8000F80F8000FC0F80007C0F80007C0F80007C0F80007C0F80007C0F80 +007C0F80007C0F80007C0F80007C0F80007C0F8000F80F8000F80F8000F80F8001F80F80 +01F00F8003F00F8003F00F8007E00F800FC00F803FC07FFFFF80FFFFFF00FFFFFE00FFFF +FC007FFFF0001E297FA823>II<7FFFFFFCFFFFFFFEFFFFFFFEFFFFFFFE7FFFFFFE07C0003E07C0003E07C0003E +07C0003E07C0003E07C0001C07C0000007C0000007C0000007C00E0007C01F0007C01F00 +07C01F0007FFFF0007FFFF0007FFFF0007FFFF0007FFFF0007C01F0007C01F0007C01F00 +07C00E0007C0000007C0000007C0000007C0000007C0000007C0000007C0000007C00000 +07C000007FFE0000FFFF0000FFFF0000FFFF00007FFE00001F297FA823>I<001FC1C000 +7FF3E000FFFFE003FFFFE007FFFFE007F83FE00FE01FE01FC00FE01F800FE03F8007E03F +0007E07E0003E07E0003E07E0003E07C0001C0FC000000FC000000F8000000F8000000F8 +000000F8000000F8000000F8000000F8007FF8F8007FFCF8007FFCFC007FFCFC007FF87C +0003E07E0003E07E0007E07E0007E03F0007E03F800FE01F800FE01FC01FE00FE01FE007 +F87FE007FFFFE003FFFFE000FFFBE0007FF1C0001FC0001E2B7EA923>I<7FF00FFEFFF8 +1FFFFFF81FFFFFF81FFF7FF00FFE0F8001F00F8001F00F8001F00F8001F00F8001F00F80 +01F00F8001F00F8001F00F8001F00F8001F00F8001F00F8001F00FFFFFF00FFFFFF00FFF +FFF00FFFFFF00FFFFFF00F8001F00F8001F00F8001F00F8001F00F8001F00F8001F00F80 +01F00F8001F00F8001F00F8001F00F8001F00F8001F00F8001F00F8001F07FF00FFEFFF8 +1FFFFFF81FFFFFF81FFF7FF00FFE20297FA823>II<7FE01FF8FFF03FF8FFF03FF8FFF03FF87FE01FF80F000F800F001F000F +003F000F007E000F007C000F00F8000F01F8000F03F0000F03E0000F07C0000F0FC0000F +1F80000F1F80000F3F80000F7FC0000FFFE0000FFBE0000FF1F0000FF1F0000FE0F8000F +C0F8000F807C000F807C000F003E000F003E000F001F000F001F000F000F800F000FC00F +0007C00F0003E07FE007FCFFF00FFCFFF00FFCFFF00FFC7FE007FC1E297EA823>75 +D<7FFE0000FFFF0000FFFF0000FFFF00007FFE000007C0000007C0000007C0000007C000 +0007C0000007C0000007C0000007C0000007C0000007C0000007C0000007C0000007C000 +0007C0000007C0000007C0000007C0000007C0000007C0000007C0000007C0000007C000 +0007C0000007C0000007C0007C07C0007C07C0007C07C0007C07C0007C07C0007C07C000 +7C7FFFFFFCFFFFFFFCFFFFFFFCFFFFFFFC7FFFFFF81E297EA823>II<7FC01FF8FFC03FFCFFE03FFCFFE03FFC7F +F01FF80F7003C00F7003C00F7803C00F3803C00F3803C00F3C03C00F3C03C00F1C03C00F +1E03C00F1E03C00F0E03C00F0F03C00F0F03C00F0F03C00F0783C00F0783C00F0783C00F +03C3C00F03C3C00F03C3C00F01C3C00F01E3C00F01E3C00F00E3C00F00F3C00F00F3C00F +0073C00F0073C00F007BC00F003BC00F003BC07FE03FC0FFF01FC0FFF01FC0FFF00FC07F +E00F801E297EA823>I<03FFF0000FFFFC001FFFFE003FFFFF003FFFFF007F807F807E00 +1F807C000F807C000F80FC000FC0F80007C0F80007C0F80007C0F80007C0F80007C0F800 +07C0F80007C0F80007C0F80007C0F80007C0F80007C0F80007C0F80007C0F80007C0F800 +07C0F80007C0F80007C0F80007C0F80007C0F80007C0F80007C0F80007C0FC000FC0FC00 +0FC07C000F807C000F807E001F807F807F803FFFFF003FFFFF001FFFFE000FFFFC0003FF +F0001A2B7CA923>II< +7FFFC00000FFFFF80000FFFFFC0000FFFFFF00007FFFFF00000F807F80000F801FC0000F +800FC0000F8007E0000F8003E0000F8003E0000F8003E0000F8003E0000F8003E0000F80 +07E0000F800FC0000F801FC0000F807F80000FFFFF00000FFFFF00000FFFFC00000FFFFE +00000FFFFF00000F807F00000F803F80000F801F80000F800F80000F800F80000F800F80 +000F800F80000F800F80000F800F80000F800F87000F800F8F800F800F8F800F800FCF80 +7FF00FDF80FFF807FF80FFF807FF00FFF803FF007FF001FE00000000F800212A7FA823> +82 D<00FF838003FFE3C007FFFFC01FFFFFC03FFFFFC07FC0FFC07F003FC0FE001FC0FC +000FC0FC000FC0F8000FC0F80007C0F80007C0FC000380FC0000007E0000007F0000003F +F000001FFF00000FFFF00007FFFC0001FFFE00001FFF800001FFC000001FC000000FE000 +0007E0000003F0000003F0700001F0F80001F0F80001F0F80001F0FC0003F0FC0003F0FE +0007E0FF000FE0FFE03FC0FFFFFFC0FFFFFF00FFFFFE00F1FFFC00703FE0001C2B7DA923 +>I<7FFFFFF8FFFFFFFCFFFFFFFCFFFFFFFCFFFFFFFCF807C07CF807C07CF807C07CF807 +C07CF807C07C7007C0380007C0000007C0000007C0000007C0000007C0000007C0000007 +C0000007C0000007C0000007C0000007C0000007C0000007C0000007C0000007C0000007 +C0000007C0000007C0000007C0000007C0000007C0000007C0000007C0000007C0000007 +C00000FFFE0001FFFF0001FFFF0001FFFF0000FFFE001E297EA823>II<7FF00FFEFFF00FFFFFF00FFF +FFF00FFF7FF00FFE0F8001F00F8001F007C003E007C003E007C003E007C003E003E007C0 +03E007C003E007C003E007C003F00FC001F00F8001F00F8001F00F8001F00F8000F81F00 +00F81F0000F81F0000F81F00007C3E00007C3E00007C3E00007C3E00003C3C00003E7C00 +003E7C00003E7C00001E7800001E7800001E7800001E7800001FF800000FF000000FF000 +000FF0000007E0000007E000202A7FA823>II<7FF07FF07FF8FFF07FF8FFF07FF8FFF07FF07FF007E0 +3F0003E03E0003F07E0001F07C0001F8FC0000F8F80000FDF800007DF000007FF000003F +E000003FE000001FC000001FC000000F8000000F8000000FC000001FC000001FE000003F +E000003FF000007DF000007CF80000F8F80000F87C0001F07C0001F03E0003F03E0003E0 +1F0007E01F0007C00F800FC00F807FE03FF8FFF03FFCFFF03FFCFFF03FFC7FE03FF81E29 +7EA823>I<7FF00FFEFFF81FFFFFF81FFFFFF81FFF7FF00FFE07C003E007E007E003E007 +C003F007C001F00FC001F80F8000F81F8000FC1F00007C1F00007C3E00003E3E00003E7E +00001F7C00001F7C00000FF800000FF8000007F0000007F0000007E0000003E0000003E0 +000003E0000003E0000003E0000003E0000003E0000003E0000003E0000003E0000003E0 +000003E000001FFC00003FFE00003FFE00003FFE00001FFC0020297FA823>I<03FF8000 +0FFFE0001FFFF8003FFFFC003FFFFE003F80FF003F803F801F001F8000001F8000000F80 +0000FF80007FFF8003FFFF800FFFFF801FFFFF807FF00F807F000F80FC000F80FC000F80 +F8000F80F8001F80FC001F80FE003F807F81FF807FFFFFFC3FFFFFFE1FFFFFFE07FFC7FE +01FE01FC1F1D7D9C23>97 DI<003FF00000FFFC0003FFFF0007FFFF000FFFFF001FE07F003F807F007F003E007E +0000007E000000FC000000FC000000F8000000F8000000F8000000F8000000F8000000FC +000000FC0000007E0007007F000F807F001F803FC01F801FF07F000FFFFF0007FFFE0003 +FFFC0000FFF000003FC000191D7B9C23>I<0000FF800000FFC00000FFC00000FFC00000 +FFC0000007C0000007C0000007C0000007C0000007C0000007C0000007C0007F87C001FF +E7C003FFF7C00FFFFFC01FFFFFC01FE0FFC03F803FC07F001FC07E000FC07E000FC0FC00 +07C0FC0007C0F80007C0F80007C0F80007C0F80007C0F80007C0FC000FC0FC000FC07E00 +0FC07E001FC07F003FC03F807FC03FE0FFC01FFFFFFE0FFFFFFE03FFF7FE01FFC7FE007F +03FE1F297EA823>I<003FC00000FFF80003FFFC0007FFFF000FFFFF801FF07F803F801F +C03F000FC07E0007C07E0007E0FC0007E0FFFFFFE0FFFFFFE0FFFFFFE0FFFFFFE0FFFFFF +C0F8000000FC000000FC0000007E0001C07F0003E03F8007E03FE007E01FF81FC00FFFFF +C007FFFF8001FFFE00007FFC00001FE0001B1D7D9C23>I<00007F000001FFC00007FFE0 +000FFFE0001FFFE0003FCFE0003F0FE0003E07C0003E0000003E0000003E0000003E0000 +7FFFFF80FFFFFFC0FFFFFFC0FFFFFFC07FFFFF80003E0000003E0000003E0000003E0000 +003E0000003E0000003E0000003E0000003E0000003E0000003E0000003E0000003E0000 +003E0000003E0000003E0000003E0000003E0000003E00003FFFFE007FFFFF007FFFFF00 +7FFFFF003FFFFE001B297EA823>I104 D<00380000FE0000FE0000FE0000FE0000FE0000380000000000000000 +00000000000000000000007FFC00FFFE00FFFE00FFFE007FFE00003E00003E00003E0000 +3E00003E00003E00003E00003E00003E00003E00003E00003E00003E00003E00003E0000 +3E00003E00003E00003E007FFFFEFFFFFFFFFFFFFFFFFF7FFFFE182A7AA923>I<7F8000 +00FFC00000FFC00000FFC000007FC0000003C0000003C0000003C0000003C0000003C000 +0003C0000003C0000003C0FFFC03C1FFFC03C1FFFC03C1FFFC03C0FFFC03C03F0003C07E +0003C0FC0003C1F80003C3F00003C7E00003CFC00003DFE00003FFF00003FFF00003FCF8 +0003F8FC0003F07E0003E03E0003C01F0003C01F8003C00FC003C007C003C003E07FFE0F +FFFFFF1FFFFFFF1FFFFFFF1FFF7FFE0FFF20297FA823>107 D<7FFE0000FFFF0000FFFF +0000FFFF00007FFF0000001F0000001F0000001F0000001F0000001F0000001F0000001F +0000001F0000001F0000001F0000001F0000001F0000001F0000001F0000001F0000001F +0000001F0000001F0000001F0000001F0000001F0000001F0000001F0000001F0000001F +0000001F0000001F0000001F0000001F0000001F0000001F00007FFFFFC0FFFFFFE0FFFF +FFE0FFFFFFE07FFFFFC01B297CA823>I<7E1F01F000FF7FC7FC00FFFFEFFE00FFFFFFFE +007FFFFFFF000FE1FE1F000FC1FC1F000FC0FC0F000F80F80F000F80F80F000F00F00F00 +0F00F00F000F00F00F000F00F00F000F00F00F000F00F00F000F00F00F000F00F00F000F +00F00F000F00F00F000F00F00F000F00F00F000F00F00F000F00F00F007FE3FE3FE0FFF3 +FF3FF0FFF3FF3FF0FFF3FF3FF07FE3FE3FE0241D819C23>II<003F000001FFE00003FFF00007FFF8000FFFFC001FC0FE003F807F003E001F00 +7E001F807C000F80FC000FC0F80007C0F80007C0F80007C0F80007C0F80007C0F80007C0 +FC000FC0FC000FC07C000F807E001F803F003F003F807F001FE1FE000FFFFC0007FFF800 +03FFF00001FFE000003F00001A1D7C9C23>II<7FF00FE0FFF87FF8FFF8FFFCFFFB +FFFC7FFFFFFC00FFF8FC00FFC07800FF800000FF000000FE000000FE000000FC000000FC +000000FC000000F8000000F8000000F8000000F8000000F8000000F8000000F8000000F8 +000000F8000000F800007FFFFC00FFFFFE00FFFFFE00FFFFFE007FFFFC001E1D7E9C23> +114 D<01FF9C0FFFFE1FFFFE7FFFFE7FFFFEFF00FEFC007EF8003EF8003EFC001CFE0000 +7FF0003FFF800FFFE007FFF8007FFC0001FE00007F70003FF8001FFC001FFC003FFE003F +FF80FFFFFFFEFFFFFCFFFFF8FFFFF070FF80181D7B9C23>I<0070000000F8000000F800 +0000F8000000F8000000F8000000F8000000F800007FFFFF80FFFFFFC0FFFFFFC0FFFFFF +C07FFFFF8000F8000000F8000000F8000000F8000000F8000000F8000000F8000000F800 +0000F8000000F8000000F8000000F8000000F801C000F803E000F803E000F803E000FC07 +E000FC0FE000FE1FC0007FFFC0007FFF80003FFE00000FFC000007F0001B257EA423>I< +FF807FC0FFC07FE0FFC07FE0FFC07FE0FFC07FE007C003E007C003E007C003E007C003E0 +07C003E007C003E007C003E007C003E007C003E007C003E007C003E007C003E007C003E0 +07C003E007C003E007C003E007C007E007C00FE007E03FE007FFFFFF03FFFFFF03FFFFFF +00FFF3FF003FC1FF201D7F9C23>II<7FF00FFEFFF8 +1FFFFFF81FFFFFF81FFF7FF00FFE1F0000F80F0000F00F0000F00F0000F00F0000F00F00 +00F00F8000F00783E1E00787F1E00787F1E00787F1E0078FF9E0038F79C003CF79C003CF +79C003DF7BC003DF7BC003DE3BC001DE3B8001FE3F8001FE3F8001FC1F8001FC1F8000F8 +0F00201D7F9C23>I<7FF07FF0FFF8FFF8FFF8FFF8FFF8FFF87FF07FF003E03E0001F03C +0000F87C000078F800007CF000003FE000001FE000000FC000000F800000078000000FC0 +00001FE000003DE000003CF0000078780000F07C0001F03C0001E01E0003C01F007FF03F +F8FFF87FFCFFF87FFCFFF87FFC7FF03FF81E1D7E9C23>II<3FFFFFF07FFFFFF87F +FFFFF87FFFFFF87FFFFFF87C000FF07C001FE07C003FC038007F800000FF000001FC0000 +03F8000007F000000FE000001FC000003F8000007F000000FE000001FC000007F800700F +F000F81FE000F83FC000F87F8000F8FFFFFFF8FFFFFFF8FFFFFFF8FFFFFFF87FFFFFF01D +1D7E9C23>I E /Fo 10 119 df<00001E000000003E00000000FE00000007FE0000003F +FE0000FFFFFE0000FFFFFE0000FFFFFE0000FFCFFE0000000FFE0000000FFE0000000FFE +0000000FFE0000000FFE0000000FFE0000000FFE0000000FFE0000000FFE0000000FFE00 +00000FFE0000000FFE0000000FFE0000000FFE0000000FFE0000000FFE0000000FFE0000 +000FFE0000000FFE0000000FFE0000000FFE0000000FFE0000000FFE0000000FFE000000 +0FFE0000000FFE0000000FFE0000000FFE0000000FFE0000000FFE0000000FFE0000000F +FE0000000FFE0000000FFE0000000FFE0000000FFE0000000FFE0000000FFE0000000FFE +0000000FFE0000000FFE0000000FFE00007FFFFFFF807FFFFFFF807FFFFFFF807FFFFFFF +80213779B630>49 D<000FFC0000007FFFC00001FFFFF00007FFFFFC000FF03FFF001F80 +0FFF803F0003FFC07F0003FFC07FC001FFE0FFE000FFF0FFF000FFF0FFF000FFF0FFF000 +7FF8FFF0007FF8FFF0007FF8FFF0007FF87FE0007FF83FC0007FF80F00007FF80000007F +F8000000FFF0000000FFF0000000FFE0000001FFE0000001FFC0000003FF80000003FF00 +000007FE00000007FC0000000FF80000001FF00000003FE00000007F800000007F000000 +00FE00000001F800000003F000780007E00078000FC00078001F800078003E0000F8007C +0000F000F80000F001F00001F003FFFFFFF003FFFFFFF007FFFFFFF00FFFFFFFF01FFFFF +FFF03FFFFFFFF07FFFFFFFE0FFFFFFFFE0FFFFFFFFE0FFFFFFFFE0FFFFFFFFE025377BB6 +30>I<0003FF0000001FFFF000007FFFFC0001FC07FF0003F003FF8007E001FFC007C000 +FFE00FF000FFE00FFC00FFF01FFC00FFF01FFE00FFF01FFE00FFF01FFE00FFF01FFE00FF +F01FFE00FFF00FFC00FFE00FFC00FFE003F001FFE0000001FFC0000003FF80000003FF00 +000007FE0000000FFC0000003FF000000FFFC000000FFF0000000FFFF000000007FE0000 +0001FF80000000FFC0000000FFE00000007FF00000007FF80000007FFC0000003FFC0000 +003FFC0000003FFE1FC0003FFE3FE0003FFE7FF0003FFEFFF8003FFEFFF8003FFEFFF800 +3FFEFFF8003FFEFFF8003FFCFFF8003FFCFFF0007FF87FF0007FF87FC000FFF03F8000FF +E01FC001FFC00FFC07FF8007FFFFFF0001FFFFFC00007FFFF0000007FF000027387CB630 +>I<00000003E00000000000000007F00000000000000007F0000000000000000FF80000 +00000000000FF8000000000000000FF8000000000000001FFC000000000000001FFC0000 +00000000003FFE000000000000003FFE000000000000003FFE000000000000007FFF0000 +00000000007FFF00000000000000FFFF80000000000000FFFF80000000000000FFFF8000 +0000000001FFFFC0000000000001F3FFC0000000000003F3FFE0000000000003E1FFE000 +0000000003E1FFE0000000000007E1FFF0000000000007C0FFF000000000000FC0FFF800 +000000000F807FF800000000000F807FF800000000001F807FFC00000000001F003FFC00 +000000003F003FFE00000000003E001FFE00000000003E001FFE00000000007E001FFF00 +000000007C000FFF0000000000FC000FFF8000000000F80007FF8000000000F80007FF80 +00000001F80007FFC000000001F00003FFC000000003FFFFFFFFE000000003FFFFFFFFE0 +00000003FFFFFFFFE000000007FFFFFFFFF000000007C00000FFF00000000FC00000FFF8 +0000000F8000007FF80000000F8000007FF80000001F8000007FFC0000001F0000003FFC +0000003F0000003FFE0000003E0000001FFE0000003E0000001FFE0000007E0000001FFF +0000007C0000000FFF000000FE0000000FFF8000FFFFF80007FFFFFF80FFFFF80007FFFF +FF80FFFFF80007FFFFFF80FFFFF80007FFFFFF80413A7DB948>65 +DI76 D<0007FF000E00003FFFE01E0000FFFFF83E0003FFFFFE7E0007FC01FFFE +000FF0001FFE001FE0000FFE003FC00003FE003F800001FE007F800000FE007F000000FE +007F0000007E00FF0000007E00FF0000003E00FF0000003E00FF8000003E00FF8000001E +00FFC000001E00FFE000001E00FFF000000000FFFC000000007FFFE00000007FFFFE0000 +007FFFFFF000003FFFFFFE00003FFFFFFF80001FFFFFFFC0000FFFFFFFF00007FFFFFFF8 +0003FFFFFFFC0000FFFFFFFE00003FFFFFFE00000FFFFFFF000001FFFFFF0000000FFFFF +800000007FFF800000000FFFC000000003FFC000000001FFC000000000FFC0F00000007F +C0F00000007FC0F00000007FC0F00000003FC0F00000003FC0F80000003FC0F80000003F +80FC0000003F80FC0000007F80FE0000007F00FF0000007F00FF800000FE00FFE00001FC +00FFF80003FC00FFFF801FF800FCFFFFFFE000F83FFFFFC000F007FFFE0000E0007FF000 +002A3B7BB935>83 D<0003FF8000001FFFF000007FFFFC0001FF83FE0003FE007F8007FC +003F800FF8003FC01FF8001FE01FF0001FE03FF0000FF03FF0000FF07FE0000FF07FE000 +0FF87FE00007F8FFE00007F8FFE00007F8FFFFFFFFF8FFFFFFFFF8FFFFFFFFF8FFE00000 +00FFE0000000FFE0000000FFE00000007FE00000007FE00000007FE00000003FF0000000 +3FF00000781FF00000780FF80000F80FFC0000F007FC0003F001FF000FE000FFC07FC000 +7FFFFF00000FFFFC000001FFE00025257DA42C>101 D<00FF00FFFF00FFFF00FFFF00FF +FF0007FF0003FF0003FF0003FF0003FF0003FF0003FF0003FF0003FF0003FF0003FF0003 +FF0003FF0003FF0003FF0003FF0003FF0003FF0003FF0003FF0003FF0003FF0003FF0003 +FF0003FF0003FF0003FF0003FF0003FF0003FF0003FF0003FF0003FF0003FF0003FF0003 +FF0003FF0003FF0003FF0003FF0003FF0003FF0003FF0003FF0003FF0003FF0003FF0003 +FF0003FF00FFFFFCFFFFFCFFFFFCFFFFFC163A7DB91B>108 D118 D E /Fp 3 52 df<0001C0000003C0000007C000001FC000007FC00007FFC000 +FFFFC000FF9FC000F81FC000001FC000001FC000001FC000001FC000001FC000001FC000 +001FC000001FC000001FC000001FC000001FC000001FC000001FC000001FC000001FC000 +001FC000001FC000001FC000001FC000001FC000001FC000001FC000001FC000001FC000 +001FC000001FC000001FC000001FC000001FC000001FC000001FC000001FC000001FC000 +001FC000001FC000001FC000001FC000001FC000001FC000001FC000001FC000001FC000 +001FC000001FC000001FC000001FC000001FC000001FC000007FF000FFFFFFF8FFFFFFF8 +FFFFFFF81D3D78BC2D>49 D<0007FC0000003FFF800000FFFFE00003F01FF80007C007FC +000F0001FE001E0000FF001C0000FF803C00007FC07800007FC07800003FE07000003FE0 +FF00003FE0FF80001FF0FFC0001FF0FFC0001FF0FFC0001FF0FFC0001FF0FFC0001FF07F +80001FF03F00001FF00C00001FF00000001FE00000003FE00000003FE00000003FC00000 +007FC00000007F80000000FF80000000FF00000001FE00000001FC00000003F800000007 +F000000007E00000000FC00000001F800000003F000000007E000000007C00000000F800 +000001F000000003E000000007C00000000F800000001F000070003E000070003C000070 +007800007000F00000E001E00000E003C00000E007800000E00F000001E01FFFFFFFE01F +FFFFFFE03FFFFFFFE07FFFFFFFC0FFFFFFFFC0FFFFFFFFC0FFFFFFFFC0243D7CBC2D>I< +0007FC0000003FFF800000F80FE00001E003F800078001FC000F0001FE000E0000FF001E +0000FF801F80007F803FC0007FC03FE0007FC03FE0007FC03FF0007FC03FE0007FC03FE0 +007FC01FE0007FC00FC0007FC00000007F80000000FF80000000FF00000000FF00000001 +FE00000001FE00000003FC00000003F800000007E00000000FC00000003F0000001FFC00 +00001FFF800000000FE000000007F800000003FC00000001FE00000000FF00000000FF80 +0000007FC00000007FC00000007FE00000003FE00000003FE00000003FF00000003FF00C +00003FF03F00003FF07F80003FF0FFC0003FF0FFC0003FF0FFC0003FF0FFC0003FE0FFC0 +003FE0FF80007FE07F00007FC07800007FC0780000FF803C0000FF801E0001FF000F0003 +FE0007C007FC0003F80FF00000FFFFE000003FFF80000007F80000243F7CBC2D>I +E end +%%EndProlog +%%BeginSetup +%%Feature: *Resolution 600dpi +TeXDict begin +%%PaperSize: Letter + @landscape +%%EndSetup +%%Page: 1 1 +1 0 bop 2488 4276 a Fp(1)p eop +%%Page: 2 2 +2 1 bop -389 -355 a Fo(Lev)m(el)32 b(1)f(BLAS)281 -276 +y Fn(dim)36 b(scalar)h(vector)108 b(vector)f(scalars)496 +b(5-element)38 b(array)2430 b Fm(pre\014xes)-389 -197 +y Fn(SUBROUTINE)38 b(xROTG)e(\()1342 b(A,)35 b(B,)h(C,)g(S)g(\))805 +b Fm(Generate)26 b(plane)e(rotation)1451 b(S,)23 b(D)-389 +-119 y Fn(SUBROUTINE)38 b(xROTMG\()1060 b(D1,)36 b(D2,)h(A,)e(B,)283 +b(PARAM)37 b(\))523 b Fm(Generate)26 b(mo)r(di\014ed)d(plane)h +(rotation)1163 b(S,)23 b(D)-389 -40 y Fn(SUBROUTINE)38 +b(xROT)71 b(\()36 b(N,)318 b(X,)36 b(INCX,)h(Y,)f(INCY,)530 +b(C,)36 b(S)g(\))805 b Fm(Apply)24 b(plane)g(rotation)1542 +b(S,)23 b(D)-389 39 y Fn(SUBROUTINE)38 b(xROTM)e(\()g(N,)318 +b(X,)36 b(INCX,)h(Y,)f(INCY,)777 b(PARAM)37 b(\))523 +b Fm(Apply)24 b(mo)r(di\014ed)f(plane)h(rotation)1254 +b(S,)23 b(D)-389 118 y Fn(SUBROUTINE)38 b(xSWAP)e(\()g(N,)318 +b(X,)36 b(INCX,)h(Y,)f(INCY)g(\))1511 b Fl(x)20 b Fk($)f +Fl(y)2003 b Fm(S,)23 b(D,)g(C,)g(Z)-389 197 y Fn(SUBROUTINE)38 +b(xSCAL)e(\()g(N,)71 b(ALPHA,)37 b(X,)f(INCX)g(\))1829 +b Fl(x)20 b Fk( )f Fl(\013x)1953 b Fm(S,)23 b(D,)g(C,)g(Z,)h(CS,)f(ZD) +-389 276 y Fn(SUBROUTINE)38 b(xCOPY)e(\()g(N,)318 b(X,)36 +b(INCX,)h(Y,)f(INCY)g(\))1511 b Fl(y)22 b Fk( )d Fl(x)2001 +b Fm(S,)23 b(D,)g(C,)g(Z)-389 355 y Fn(SUBROUTINE)38 +b(xAXPY)e(\()g(N,)71 b(ALPHA,)37 b(X,)f(INCX,)h(Y,)f(INCY)g(\))1511 +b Fl(y)22 b Fk( )d Fl(\013x)d Fm(+)g Fl(y)1834 b Fm(S,)23 +b(D,)g(C,)g(Z)-389 436 y Fn(FUNCTION)108 b(xDOT)71 b(\()36 +b(N,)318 b(X,)36 b(INCX,)h(Y,)f(INCY)g(\))1511 b Fl(dot)21 +b Fk( )e Fl(x)3061 412 y Fj(T)3109 436 y Fl(y)1859 b +Fm(S,)23 b(D,)g(DS)-389 517 y Fn(FUNCTION)108 b(xDOTU)36 +b(\()g(N,)318 b(X,)36 b(INCX,)h(Y,)f(INCY)g(\))1511 b +Fl(dot)21 b Fk( )e Fl(x)3061 494 y Fj(T)3109 517 y Fl(y)1859 +b Fm(C,)23 b(Z)-389 598 y Fn(FUNCTION)108 b(xDOTC)36 +b(\()g(N,)318 b(X,)36 b(INCX,)h(Y,)f(INCY)g(\))1511 b +Fl(dot)21 b Fk( )e Fl(x)3061 575 y Fj(H)3118 598 y Fl(y)1850 +b Fm(C,)23 b(Z)-389 679 y Fn(FUNCTION)108 b(xxDOT)36 +b(\()g(N,)318 b(X,)36 b(INCX,)h(Y,)f(INCY)g(\))1511 b +Fl(dot)21 b Fk( )e Fl(\013)d Fm(+)g Fl(x)3193 656 y Fj(T)3241 +679 y Fl(y)1727 b Fm(SDS)-389 758 y Fn(FUNCTION)108 b(xNRM2)36 +b(\()g(N,)318 b(X,)36 b(INCX)g(\))1829 b Fl(nr)r(m)p +Fm(2)20 b Fk( )f(jj)p Fl(x)p Fk(jj)3219 767 y Fi(2)5003 +758 y Fm(S,)k(D,)g(SC,)h(DZ)-389 837 y Fn(FUNCTION)108 +b(xASUM)36 b(\()g(N,)318 b(X,)36 b(INCX)g(\))1829 b Fl(asum)20 +b Fk( )f(jj)p Fl(r)r(e)p Fm(\()p Fl(x)p Fm(\))p Fk(jj)3339 +846 y Fi(1)3388 837 y Fm(+)d Fk(jj)p Fl(im)p Fm(\()p +Fl(x)p Fm(\))p Fk(jj)3719 846 y Fi(1)5003 837 y Fm(S,)23 +b(D,)g(SC,)h(DZ)-389 916 y Fn(FUNCTION)108 b(IxAMAX\()37 +b(N,)318 b(X,)36 b(INCX)g(\))1829 b Fl(amax)21 b Fk( )e +Fm(1)3137 892 y Fj(st)3195 916 y Fl(k)i Fk(3)e(j)p Fl(r)r(e)p +Fm(\()p Fl(x)3473 928 y Fj(k)3511 916 y Fm(\))p Fk(j)c +Fm(+)h Fk(j)p Fl(im)p Fm(\()p Fl(x)3817 928 y Fj(k)3855 +916 y Fm(\))p Fk(j)1101 b Fm(S,)23 b(D,)g(C,)g(Z)3262 +995 y(=)d Fl(max)p Fm(\()p Fk(j)p Fl(r)r(e)p Fm(\()p +Fl(x)3657 1005 y Fj(i)3683 995 y Fm(\))p Fk(j)c Fm(+)g +Fk(j)p Fl(im)p Fm(\()p Fl(x)3990 1005 y Fj(i)4016 995 +y Fm(\))p Fk(j)p Fm(\))-389 1076 y Fo(Lev)m(el)32 b(2)f(BLAS)-107 +1155 y Fn(options)425 b(dim)107 b(b-width)37 b(scalar)g(matrix)73 +b(vector)107 b(scalar)37 b(vector)-389 1236 y(xGEMV)f(\()283 +b(TRANS,)213 b(M,)36 b(N,)318 b(ALPHA,)37 b(A,)f(LDA,)h(X,)e(INCX,)i +(BETA,)72 b(Y,)36 b(INCY)g(\))347 b Fl(y)22 b Fk( )d +Fl(\013Ax)d Fm(+)g Fl(\014)s(y)r(;)11 b(y)22 b Fk( )d +Fl(\013A)3543 1213 y Fj(T)3592 1236 y Fl(x)c Fm(+)h Fl(\014)s(y)r(;)c +(y)22 b Fk( )d Fl(\013A)4075 1213 y Fj(H)4133 1236 y +Fl(x)c Fm(+)h Fl(\014)s(y)r(;)11 b(A)16 b Fk(\000)f Fl(m)h +Fk(\002)g Fl(n)302 b Fm(S,)23 b(D,)g(C,)g(Z)-389 1317 +y Fn(xGBMV)36 b(\()283 b(TRANS,)213 b(M,)36 b(N,)g(KL,)g(KU,)g(ALPHA,)h +(A,)f(LDA,)h(X,)e(INCX,)i(BETA,)72 b(Y,)36 b(INCY)g(\))347 +b Fl(y)22 b Fk( )d Fl(\013Ax)d Fm(+)g Fl(\014)s(y)r(;)11 +b(y)22 b Fk( )d Fl(\013A)3543 1294 y Fj(T)3592 1317 y +Fl(x)c Fm(+)h Fl(\014)s(y)r(;)c(y)22 b Fk( )d Fl(\013A)4075 +1294 y Fj(H)4133 1317 y Fl(x)c Fm(+)h Fl(\014)s(y)r(;)11 +b(A)16 b Fk(\000)f Fl(m)h Fk(\002)g Fl(n)302 b Fm(S,)23 +b(D,)g(C,)g(Z)-389 1396 y Fn(xHEMV)36 b(\()g(UPLO,)601 +b(N,)318 b(ALPHA,)37 b(A,)f(LDA,)h(X,)e(INCX,)i(BETA,)72 +b(Y,)36 b(INCY)g(\))347 b Fl(y)22 b Fk( )d Fl(\013Ax)d +Fm(+)g Fl(\014)s(y)1738 b Fm(C,)23 b(Z)-389 1475 y Fn(xHBMV)36 +b(\()g(UPLO,)601 b(N,)36 b(K,)212 b(ALPHA,)37 b(A,)f(LDA,)h(X,)e(INCX,) +i(BETA,)72 b(Y,)36 b(INCY)g(\))347 b Fl(y)22 b Fk( )d +Fl(\013Ax)d Fm(+)g Fl(\014)s(y)1738 b Fm(C,)23 b(Z)-389 +1554 y Fn(xHPMV)36 b(\()g(UPLO,)601 b(N,)318 b(ALPHA,)37 +b(AP,)178 b(X,)35 b(INCX,)i(BETA,)72 b(Y,)36 b(INCY)g(\))347 +b Fl(y)22 b Fk( )d Fl(\013Ax)d Fm(+)g Fl(\014)s(y)1738 +b Fm(C,)23 b(Z)-389 1633 y Fn(xSYMV)36 b(\()g(UPLO,)601 +b(N,)318 b(ALPHA,)37 b(A,)f(LDA,)h(X,)e(INCX,)i(BETA,)72 +b(Y,)36 b(INCY)g(\))347 b Fl(y)22 b Fk( )d Fl(\013Ax)d +Fm(+)g Fl(\014)s(y)1738 b Fm(S,)23 b(D)-389 1712 y Fn(xSBMV)36 +b(\()g(UPLO,)601 b(N,)36 b(K,)212 b(ALPHA,)37 b(A,)f(LDA,)h(X,)e(INCX,) +i(BETA,)72 b(Y,)36 b(INCY)g(\))347 b Fl(y)22 b Fk( )d +Fl(\013Ax)d Fm(+)g Fl(\014)s(y)1738 b Fm(S,)23 b(D)-389 +1790 y Fn(xSPMV)36 b(\()g(UPLO,)601 b(N,)318 b(ALPHA,)37 +b(AP,)178 b(X,)35 b(INCX,)i(BETA,)72 b(Y,)36 b(INCY)g(\))347 +b Fl(y)22 b Fk( )d Fl(\013Ax)d Fm(+)g Fl(\014)s(y)1738 +b Fm(S,)23 b(D)-389 1872 y Fn(xTRMV)36 b(\()g(UPLO,)h(TRANS,)g(DIAG,) +142 b(N,)565 b(A,)36 b(LDA,)h(X,)e(INCX)i(\))911 b Fl(x)20 +b Fk( )f Fl(Ax;)11 b(x)19 b Fk( )g Fl(A)3291 1848 y Fj(T)3340 +1872 y Fl(x;)11 b(x)19 b Fk( )g Fl(A)3613 1848 y Fj(H)3671 +1872 y Fl(x)1292 b Fm(S,)23 b(D,)g(C,)g(Z)-389 1953 y +Fn(xTBMV)36 b(\()g(UPLO,)h(TRANS,)g(DIAG,)142 b(N,)36 +b(K,)459 b(A,)36 b(LDA,)h(X,)e(INCX)i(\))911 b Fl(x)20 +b Fk( )f Fl(Ax;)11 b(x)19 b Fk( )g Fl(A)3291 1929 y Fj(T)3340 +1953 y Fl(x;)11 b(x)19 b Fk( )g Fl(A)3613 1929 y Fj(H)3671 +1953 y Fl(x)1292 b Fm(S,)23 b(D,)g(C,)g(Z)-389 2034 y +Fn(xTPMV)36 b(\()g(UPLO,)h(TRANS,)g(DIAG,)142 b(N,)565 +b(AP,)178 b(X,)35 b(INCX)i(\))911 b Fl(x)20 b Fk( )f +Fl(Ax;)11 b(x)19 b Fk( )g Fl(A)3291 2010 y Fj(T)3340 +2034 y Fl(x;)11 b(x)19 b Fk( )g Fl(A)3613 2010 y Fj(H)3671 +2034 y Fl(x)1292 b Fm(S,)23 b(D,)g(C,)g(Z)-389 2115 y +Fn(xTRSV)36 b(\()g(UPLO,)h(TRANS,)g(DIAG,)142 b(N,)565 +b(A,)36 b(LDA,)h(X,)e(INCX)i(\))911 b Fl(x)20 b Fk( )f +Fl(A)3018 2092 y Fh(\000)p Fi(1)3100 2115 y Fl(x;)11 +b(x)20 b Fk( )f Fl(A)3374 2092 y Fh(\000)p Fj(T)3470 +2115 y Fl(x;)11 b(x)20 b Fk( )f Fl(A)3744 2092 y Fh(\000)p +Fj(H)3849 2115 y Fl(x)1114 b Fm(S,)23 b(D,)g(C,)g(Z)-389 +2196 y Fn(xTBSV)36 b(\()g(UPLO,)h(TRANS,)g(DIAG,)142 +b(N,)36 b(K,)459 b(A,)36 b(LDA,)h(X,)e(INCX)i(\))911 +b Fl(x)20 b Fk( )f Fl(A)3018 2173 y Fh(\000)p Fi(1)3100 +2196 y Fl(x;)11 b(x)20 b Fk( )f Fl(A)3374 2173 y Fh(\000)p +Fj(T)3470 2196 y Fl(x;)11 b(x)20 b Fk( )f Fl(A)3744 2173 +y Fh(\000)p Fj(H)3849 2196 y Fl(x)1114 b Fm(S,)23 b(D,)g(C,)g(Z)-389 +2277 y Fn(xTPSV)36 b(\()g(UPLO,)h(TRANS,)g(DIAG,)142 +b(N,)565 b(AP,)178 b(X,)35 b(INCX)i(\))911 b Fl(x)20 +b Fk( )f Fl(A)3018 2254 y Fh(\000)p Fi(1)3100 2277 y +Fl(x;)11 b(x)20 b Fk( )f Fl(A)3374 2254 y Fh(\000)p Fj(T)3470 +2277 y Fl(x;)11 b(x)20 b Fk( )f Fl(A)3744 2254 y Fh(\000)p +Fj(H)3849 2277 y Fl(x)1114 b Fm(S,)23 b(D,)g(C,)g(Z)-107 +2356 y Fn(options)425 b(dim)107 b(scalar)37 b(vector)108 +b(vector)f(matrix)-389 2437 y(xGER)71 b(\()706 b(M,)36 +b(N,)g(ALPHA,)h(X,)f(INCX,)h(Y,)e(INCY,)i(A,)f(LDA)g(\))876 +b Fl(A)20 b Fk( )f Fl(\013xy)3100 2414 y Fj(T)3165 2437 +y Fm(+)c Fl(A;)c(A)16 b Fk(\000)f Fl(m)i Fk(\002)e Fl(n)1353 +b Fm(S,)23 b(D)-389 2518 y Fn(xGERU)36 b(\()706 b(M,)36 +b(N,)g(ALPHA,)h(X,)f(INCX,)h(Y,)e(INCY,)i(A,)f(LDA)g(\))876 +b Fl(A)20 b Fk( )f Fl(\013xy)3100 2495 y Fj(T)3165 2518 +y Fm(+)c Fl(A;)c(A)16 b Fk(\000)f Fl(m)i Fk(\002)e Fl(n)1353 +b Fm(C,)23 b(Z)-389 2600 y Fn(xGERC)36 b(\()706 b(M,)36 +b(N,)g(ALPHA,)h(X,)f(INCX,)h(Y,)e(INCY,)i(A,)f(LDA)g(\))876 +b Fl(A)20 b Fk( )f Fl(\013xy)3100 2576 y Fj(H)3174 2600 +y Fm(+)c Fl(A;)c(A)16 b Fk(\000)g Fl(m)g Fk(\002)f Fl(n)1344 +b Fm(C,)23 b(Z)-389 2681 y Fn(xHER)71 b(\()36 b(UPLO,)601 +b(N,)36 b(ALPHA,)h(X,)f(INCX,)354 b(A,)36 b(LDA)g(\))876 +b Fl(A)20 b Fk( )f Fl(\013xx)3103 2657 y Fj(H)3176 2681 +y Fm(+)d Fl(A)1703 b Fm(C,)23 b(Z)-389 2762 y Fn(xHPR)71 +b(\()36 b(UPLO,)601 b(N,)36 b(ALPHA,)h(X,)f(INCX,)354 +b(AP)36 b(\))1017 b Fl(A)20 b Fk( )f Fl(\013xx)3103 2738 +y Fj(H)3176 2762 y Fm(+)d Fl(A)1703 b Fm(C,)23 b(Z)-389 +2843 y Fn(xHER2)36 b(\()g(UPLO,)601 b(N,)36 b(ALPHA,)h(X,)f(INCX,)h(Y,) +e(INCY,)i(A,)f(LDA)g(\))876 b Fl(A)20 b Fk( )f Fl(\013xy)3100 +2820 y Fj(H)3174 2843 y Fm(+)c Fl(y)r Fm(\()p Fl(\013x)p +Fm(\))3420 2820 y Fj(H)3495 2843 y Fm(+)g Fl(A)1385 b +Fm(C,)23 b(Z)-389 2924 y Fn(xHPR2)36 b(\()g(UPLO,)601 +b(N,)36 b(ALPHA,)h(X,)f(INCX,)h(Y,)e(INCY,)i(AP)f(\))1017 +b Fl(A)20 b Fk( )f Fl(\013xy)3100 2901 y Fj(H)3174 2924 +y Fm(+)c Fl(y)r Fm(\()p Fl(\013x)p Fm(\))3420 2901 y +Fj(H)3495 2924 y Fm(+)g Fl(A)1385 b Fm(C,)23 b(Z)-389 +3005 y Fn(xSYR)71 b(\()36 b(UPLO,)601 b(N,)36 b(ALPHA,)h(X,)f(INCX,)354 +b(A,)36 b(LDA)g(\))876 b Fl(A)20 b Fk( )f Fl(\013xx)3103 +2982 y Fj(T)3167 3005 y Fm(+)d Fl(A)1712 b Fm(S,)23 b(D)-389 +3086 y Fn(xSPR)71 b(\()36 b(UPLO,)601 b(N,)36 b(ALPHA,)h(X,)f(INCX,)354 +b(AP)36 b(\))1017 b Fl(A)20 b Fk( )f Fl(\013xx)3103 3063 +y Fj(T)3167 3086 y Fm(+)d Fl(A)1712 b Fm(S,)23 b(D)-389 +3168 y Fn(xSYR2)36 b(\()g(UPLO,)601 b(N,)36 b(ALPHA,)h(X,)f(INCX,)h(Y,) +e(INCY,)i(A,)f(LDA)g(\))876 b Fl(A)20 b Fk( )f Fl(\013xy)3100 +3144 y Fj(T)3165 3168 y Fm(+)c Fl(\013y)r(x)3357 3144 +y Fj(T)3422 3168 y Fm(+)g Fl(A)1458 b Fm(S,)23 b(D)-389 +3249 y Fn(xSPR2)36 b(\()g(UPLO,)601 b(N,)36 b(ALPHA,)h(X,)f(INCX,)h(Y,) +e(INCY,)i(AP)f(\))1017 b Fl(A)20 b Fk( )f Fl(\013xy)3100 +3225 y Fj(T)3165 3249 y Fm(+)c Fl(\013y)r(x)3357 3225 +y Fj(T)3422 3249 y Fm(+)g Fl(A)1458 b Fm(S,)23 b(D)-389 +3409 y Fo(Lev)m(el)32 b(3)f(BLAS)-107 3488 y Fn(options)919 +b(dim)213 b(scalar)37 b(matrix)72 b(matrix)g(scalar)37 +b(matrix)-389 3569 y(xGEMM)f(\()459 b(TRANSA,)38 b(TRANSB,)213 +b(M,)36 b(N,)g(K,)g(ALPHA,)h(A,)f(LDA,)g(B,)g(LDB,)g(BETA,)72 +b(C,)36 b(LDC)g(\))100 b Fl(C)25 b Fk( )19 b Fl(\013op)p +Fm(\()p Fl(A)p Fm(\))p Fl(op)p Fm(\()p Fl(B)s Fm(\))e(+)f +Fl(\014)s(C)q(;)c(op)p Fm(\()p Fl(X)5 b Fm(\))20 b(=)f +Fl(X)q(;)12 b(X)4031 3545 y Fj(T)4080 3569 y Fl(;)f(X)4174 +3545 y Fj(H)4232 3569 y Fl(;)h(C)20 b Fk(\000)c Fl(m)g +Fk(\002)f Fl(n)407 b Fm(S,)23 b(D,)g(C,)g(Z)-389 3650 +y Fn(xSYMM)36 b(\()g(SIDE,)h(UPLO,)777 b(M,)36 b(N,)142 +b(ALPHA,)37 b(A,)f(LDA,)g(B,)g(LDB,)g(BETA,)72 b(C,)36 +b(LDC)g(\))100 b Fl(C)25 b Fk( )19 b Fl(\013AB)g Fm(+)d +Fl(\014)s(C)q(;)11 b(C)25 b Fk( )19 b Fl(\013B)s(A)e +Fm(+)e Fl(\014)s(C)q(;)d(C)20 b Fk(\000)c Fl(m)g Fk(\002)g +Fl(n;)11 b(A)19 b Fm(=)h Fl(A)4442 3627 y Fj(T)5003 3650 +y Fm(S,)j(D,)g(C,)g(Z)-389 3731 y Fn(xHEMM)36 b(\()g(SIDE,)h(UPLO,)777 +b(M,)36 b(N,)142 b(ALPHA,)37 b(A,)f(LDA,)g(B,)g(LDB,)g(BETA,)72 +b(C,)36 b(LDC)g(\))100 b Fl(C)25 b Fk( )19 b Fl(\013AB)g +Fm(+)d Fl(\014)s(C)q(;)11 b(C)25 b Fk( )19 b Fl(\013B)s(A)e +Fm(+)e Fl(\014)s(C)q(;)d(C)20 b Fk(\000)c Fl(m)g Fk(\002)g +Fl(n;)11 b(A)19 b Fm(=)h Fl(A)4442 3708 y Fj(H)5003 3731 +y Fm(C,)j(Z)-389 3812 y Fn(xSYRK)36 b(\()248 b(UPLO,)36 +b(TRANS,)637 b(N,)36 b(K,)g(ALPHA,)h(A,)f(LDA,)318 b(BETA,)72 +b(C,)36 b(LDC)g(\))100 b Fl(C)25 b Fk( )19 b Fl(\013AA)3131 +3789 y Fj(T)3195 3812 y Fm(+)d Fl(\014)s(C)q(;)11 b(C)25 +b Fk( )19 b Fl(\013A)3654 3789 y Fj(T)3703 3812 y Fl(A)d +Fm(+)f Fl(\014)s(C)q(;)d(C)20 b Fk(\000)c Fl(n)f Fk(\002)h +Fl(n)722 b Fm(S,)23 b(D,)g(C,)g(Z)-389 3893 y Fn(xHERK)36 +b(\()248 b(UPLO,)36 b(TRANS,)637 b(N,)36 b(K,)g(ALPHA,)h(A,)f(LDA,)318 +b(BETA,)72 b(C,)36 b(LDC)g(\))100 b Fl(C)25 b Fk( )19 +b Fl(\013AA)3131 3870 y Fj(H)3205 3893 y Fm(+)c Fl(\014)s(C)q(;)d(C)24 +b Fk( )19 b Fl(\013A)3663 3870 y Fj(H)3721 3893 y Fl(A)d +Fm(+)f Fl(\014)s(C)q(;)d(C)20 b Fk(\000)c Fl(n)f Fk(\002)h +Fl(n)704 b Fm(C,)23 b(Z)-389 3975 y Fn(xSYR2K\()249 b(UPLO,)36 +b(TRANS,)637 b(N,)36 b(K,)g(ALPHA,)h(A,)f(LDA,)g(B,)g(LDB,)g(BETA,)72 +b(C,)36 b(LDC)g(\))100 b Fl(C)25 b Fk( )19 b Fl(\013AB)3134 +3951 y Fj(T)3199 3975 y Fm(+)k(\026)-42 b Fl(\013B)s(A)3424 +3951 y Fj(T)3489 3975 y Fm(+)15 b Fl(\014)s(C)q(;)d(C)24 +b Fk( )19 b Fl(\013A)3947 3951 y Fj(T)3996 3975 y Fl(B)g +Fm(+)k(\026)-42 b Fl(\013B)4240 3951 y Fj(T)4289 3975 +y Fl(A)16 b Fm(+)g Fl(\014)s(C)q(;)11 b(C)21 b Fk(\000)15 +b Fl(n)h Fk(\002)f Fl(n)136 b Fm(S,)23 b(D,)g(C,)g(Z)-389 +4056 y Fn(xHER2K\()249 b(UPLO,)36 b(TRANS,)637 b(N,)36 +b(K,)g(ALPHA,)h(A,)f(LDA,)g(B,)g(LDB,)g(BETA,)72 b(C,)36 +b(LDC)g(\))100 b Fl(C)25 b Fk( )19 b Fl(\013AB)3134 4032 +y Fj(H)3208 4056 y Fm(+)k(\026)-42 b Fl(\013B)s(A)3433 +4032 y Fj(H)3507 4056 y Fm(+)15 b Fl(\014)s(C)q(;)d(C)24 +b Fk( )c Fl(\013A)3966 4032 y Fj(H)4023 4056 y Fl(B)g +Fm(+)i(\026)-42 b Fl(\013)q(B)4268 4032 y Fj(H)4326 4056 +y Fl(A)15 b Fm(+)h Fl(\014)s(C)q(;)11 b(C)21 b Fk(\000)16 +b Fl(n)f Fk(\002)h Fl(n)99 b Fm(C,)23 b(Z)-389 4137 y +Fn(xTRMM)36 b(\()g(SIDE,)h(UPLO,)f(TRANSA,)285 b(DIAG,)36 +b(M,)g(N,)142 b(ALPHA,)37 b(A,)f(LDA,)g(B,)g(LDB)g(\))629 +b Fl(B)23 b Fk( )d Fl(\013op)p Fm(\()p Fl(A)p Fm(\))p +Fl(B)s(;)12 b(B)23 b Fk( )c Fl(\013B)s(op)p Fm(\()p Fl(A)p +Fm(\))p Fl(;)13 b(op)p Fm(\()p Fl(A)p Fm(\))20 b(=)f +Fl(A;)12 b(A)4178 4113 y Fj(T)4226 4137 y Fl(;)g(A)4311 +4113 y Fj(H)4368 4137 y Fl(;)f(B)20 b Fk(\000)15 b Fl(m)h +Fk(\002)g Fl(n)269 b Fm(S,)23 b(D,)g(C,)g(Z)-389 4218 +y Fn(xTRSM)36 b(\()g(SIDE,)h(UPLO,)f(TRANSA,)285 b(DIAG,)36 +b(M,)g(N,)142 b(ALPHA,)37 b(A,)f(LDA,)g(B,)g(LDB)g(\))629 +b Fl(B)23 b Fk( )d Fl(\013op)p Fm(\()p Fl(A)3177 4195 +y Fh(\000)p Fi(1)3259 4218 y Fm(\))p Fl(B)s(;)13 b(B)23 +b Fk( )c Fl(\013B)s(op)p Fm(\()p Fl(A)3792 4195 y Fh(\000)p +Fi(1)3875 4218 y Fm(\))p Fl(;)12 b(op)p Fm(\()p Fl(A)p +Fm(\))20 b(=)g Fl(A;)11 b(A)4343 4195 y Fj(T)4391 4218 +y Fl(;)h(A)4476 4195 y Fj(H)4533 4218 y Fl(;)g(B)19 b +Fk(\000)c Fl(m)h Fk(\002)g Fl(n)104 b Fm(S,)23 b(D,)g(C,)g(Z)2488 +4276 y Fp(2)p eop +%%Page: 3 3 +3 2 bop -366 -358 a Fg(Meaning)27 b(of)f(pre\014xes)-316 +-279 y Fm(S)e(-)f(REAL)641 b(C)23 b(-)g(COMPLEX)-316 +-200 y(D)g(-)g(DOUBLE)h(PRECISION)100 b(Z)23 b(-)h(COMPLEX*16)632 +-121 y(\(this)g(ma)n(y)f(not)h(b)r(e)g(supp)r(orted)632 +-42 y(b)n(y)g(all)f(mac)n(hines\))-366 344 y(F)-6 b(or)24 +b(the)g(Lev)n(el)g(2)g(BLAS)g(a)g(set)g(of)f(extended-precision)j +(routines)e(with)-366 423 y(the)h(pre\014xes)f(ES,)f(ED,)g(EC,)g(EZ)h +(ma)n(y)f(also)g(b)r(e)h(a)n(v)l(ailable.)-366 660 y +Fg(Lev)n(el)j(1)g(BLAS)-366 739 y Fm(In)d(addition)h(to)f(the)g(listed) +g(routines)g(there)g(are)g(t)n(w)n(o)g(further)-366 818 +y(extended-precision)i(dot)e(pro)r(duct)h(routines)f(DQDOTI)f(and)h +(DQDOT)-6 b(A.)-366 1054 y Fg(Lev)n(el)27 b(2)g(and)g(Lev)n(el)f(3)h +(BLAS)-366 1133 y Fm(Matrix)c(t)n(yp)r(es:)-316 1212 +y(GE)h(-)f(GEneral)197 b(GB)24 b(-)f(General)h(Band)-316 +1291 y(SY)g(-)f(SYmmetric)115 b(SB)24 b(-)f(Sym.)30 b(Band)193 +b(SP)24 b(-)f(Sum.)30 b(P)n(ac)n(k)n(ed)-316 1370 y(HE)23 +b(-)g(HErmitian)126 b(HB)23 b(-)g(Herm.)30 b(Band)144 +b(HP)24 b(-)f(Herm.)29 b(P)n(ac)n(k)n(ed)-316 1449 y(TR)23 +b(-)g(TRiangular)100 b(TB)23 b(-)g(T)-6 b(riang.)31 b(Band)115 +b(TP)24 b(-)f(T)-6 b(riang.)31 b(P)n(ac)n(k)n(ed)-366 +1685 y(Lev)n(el)24 b(2)g(and)g(Lev)n(el)h(3)e(BLAS)h(Options)-366 +1764 y(Dumm)n(y)e(options)i(argumen)n(ts)g(are)f(declared)i(as)e(CHARA) +n(CTER*1)-366 1843 y(and)h(ma)n(y)f(b)r(e)h(passed)h(as)e(c)n(haracter) +i(strings.)-316 1922 y(TRANx)99 b(=)24 b(`)p Fg(N)p Fm(o)g(transp)r +(ose',)f(`)p Fg(T)p Fm(ransp)r(ose',)194 2003 y(`)p Fg(C)p +Fm(onjugate)i(transp)r(ose')f(\()p Fl(X)q(;)12 b(X)1078 +1980 y Fj(T)1127 2003 y Fl(;)f(X)1221 1980 y Fj(H)1279 +2003 y Fm(\))-316 2082 y(UPLO)145 b(=)24 b(`)p Fg(U)p +Fm(pp)r(er)g(triangular',)e(`)p Fg(L)p Fm(o)n(w)n(er)h(triangular')-316 +2161 y(DIA)n(G)160 b(=)24 b(`)p Fg(N)p Fm(on-unit)g(triangular',)f(`)p +Fg(U)p Fm(nit)g(triangular')-316 2240 y(SIDE)179 b(=)24 +b(`)p Fg(L)p Fm(eft',)e(`)p Fg(R)p Fm(igh)n(t')h(\(A)h(or)g(op\(A\))g +(on)g(the)h(left,)194 2319 y(or)e(A)h(or)f(op\(A\))i(on)f(the)g(righ)n +(t\))-366 2645 y(F)-6 b(or)24 b(real)f(matrices,)f(TRANSx)i(=)f(`T')g +(and)h(TRANSx)g(=)f(`C')g(ha)n(v)n(e)-366 2724 y(the)i(same)e(meaning.) +-366 2803 y(F)-6 b(or)24 b(Hermitian)e(matrices,)g(TRANSx)i(=)f(`T')g +(is)g(not)h(allo)n(w)n(ed.)-366 2882 y(F)-6 b(or)24 b(complex)f +(symmetric)f(matrices,)g(TRANSx)h(=)h(`H')e(is)h(not)-366 +2961 y(allo)n(w)n(ed.)1825 -244 y Ff(References)p 1825 +-231 449 4 v 1825 -86 a Fm(C.)g(La)n(wson,)h(R.)f(Hanson,)h(D.)f +(Kincaid,)g(and)h(F.)g(Krogh,)f(\\Basic)1825 -7 y(Linear)h(Algebra)f +(Subprograms)g(for)g(F)-6 b(ortran)24 b(Usage,")g Fe(A)n(CM)i(T)-5 +b(r)l(ans.)1825 71 y(on)26 b(Math.)33 b(Soft.)f Fm(5)23 +b(\(1979\))j(308-325)1825 229 y(J.J.)d(Dongarra,)h(J.)f(DuCroz,)g(S.)h +(Hammarling,)c(and)k(R.)f(Hanson,)1825 308 y(\\An)h(Extended)i(Set)e +(of)g(F)-6 b(ortran)23 b(Basic)h(Linear)g(Algebra)f(Subprograms,")1825 +387 y Fe(A)n(CM)j(T)-5 b(r)l(ans.)33 b(on)26 b(Math.)33 +b(Soft.)e Fm(14,1)24 b(\(1988\))i(1-32)1825 545 y(J.J.)d(Dongarra,)h +(I.)f(Du\013,)h(J.)f(DuCroz,)g(and)i(S.)e(Hammarling,)d(\\A)k(Set)1825 +624 y(of)g(Lev)n(el)g(3)g(Basic)f(Linear)h(Algebra)f(Subprograms,")g +Fe(A)n(CM)j(T)-5 b(r)l(ans.)1825 702 y(on)26 b(Math.)33 +b(Soft.)f Fm(\(1989\))1825 874 y Ff(Obtaining)h(the)g(Soft)m(w)m(are)g +(via)g Fd(netlib@ornl.gov)p 1825 910 1956 4 v 1825 1047 +a Fm(T)-6 b(o)24 b(receiv)n(e)h(a)e(cop)n(y)i(of)e(the)i +(single-precision)e(soft)n(w)n(are,)1825 1126 y(t)n(yp)r(e)i(in)e(a)h +(mail)e(message:)1861 1205 y Fn(send)36 b(sblas)h(from)f(blas)1861 +1284 y(send)g(sblas2)h(from)f(blas)1861 1362 y(send)g(sblas3)h(from)f +(blas)1825 1520 y Fm(T)-6 b(o)24 b(receiv)n(e)h(a)e(cop)n(y)i(of)e(the) +i(double-precision)f(soft)n(w)n(are,)1825 1599 y(t)n(yp)r(e)h(in)e(a)h +(mail)e(message:)1861 1678 y Fn(send)36 b(dblas)h(from)f(blas)1861 +1757 y(send)g(dblas2)h(from)f(blas)1861 1836 y(send)g(dblas3)h(from)f +(blas)1825 1993 y Fm(T)-6 b(o)24 b(receiv)n(e)h(a)e(cop)n(y)i(of)e(the) +i(complex)e(single-precision)g(soft)n(w)n(are,)1825 2072 +y(t)n(yp)r(e)i(in)e(a)h(mail)e(message:)1861 2151 y Fn(send)36 +b(cblas)h(from)f(blas)1861 2230 y(send)g(cblas2)h(from)f(blas)1861 +2309 y(send)g(cblas3)h(from)f(blas)1825 2467 y Fm(T)-6 +b(o)24 b(receiv)n(e)h(a)e(cop)n(y)i(of)e(the)i(complex)e +(double-precision)h(soft)n(w)n(are,)1825 2546 y(t)n(yp)r(e)h(in)e(a)h +(mail)e(message:)1861 2624 y Fn(send)36 b(zblas)h(from)f(blas)1861 +2703 y(send)g(zblas2)h(from)f(blas)1861 2782 y(send)g(zblas3)h(from)f +(blas)1825 2940 y Fm(Send)25 b(commen)n(ts)d(and)j(questions)f(to)60 +b Fn(lapack@cs.utk.edu)40 b Fm(.)4217 237 y Fc(Basic)4217 +704 y(Linear)4217 1172 y(Algebra)4217 1655 y(Subprograms)4217 +2253 y Fb(A)54 b(Quic)l(k)g(Reference)e(Guide)4217 2501 +y Fa(Univ)m(ersit)m(y)34 b(of)h(T)-9 b(ennessee)4217 +2588 y(Oak)35 b(Ridge)f(National)f(Lab)s(oratory)4217 +2675 y(Numerical)g(Algorithms)h(Group)i(Ltd.)4217 2912 +y Fm(Ma)n(y)24 b(11,)g(1997)2488 4276 y Fp(3)p eop +%%Trailer +end +userdict /end-hook known{end-hook}if +%%EOF diff --git a/lapack-20010525.patch b/lapack-20010525.patch new file mode 100644 index 0000000..7952d07 --- /dev/null +++ b/lapack-20010525.patch @@ -0,0 +1,24100 @@ +diff -uNr LAPACK.orig/BLAS/TESTING/cblat2.f LAPACK/BLAS/TESTING/cblat2.f +--- LAPACK.orig/BLAS/TESTING/cblat2.f Thu Nov 4 14:23:26 1999 ++++ LAPACK/BLAS/TESTING/cblat2.f Fri May 25 15:57:46 2001 +@@ -64,6 +64,10 @@ + * Richard Hanson, Sandia National Labs. + * Jeremy Du Croz, NAG Central Office. + * ++* 10-9-00: Change STATUS='NEW' to 'UNKNOWN' so that the testers ++* can be run multiple times without deleting generated ++* output files (susan) ++* + * .. Parameters .. + INTEGER NIN + PARAMETER ( NIN = 5 ) +@@ -126,7 +130,7 @@ + * + READ( NIN, FMT = * )SUMMRY + READ( NIN, FMT = * )NOUT +- OPEN( NOUT, FILE = SUMMRY, STATUS = 'NEW' ) ++ OPEN( NOUT, FILE = SUMMRY, STATUS = 'UNKNOWN' ) + NOUTC = NOUT + * + * Read name and unit number for snapshot output file and open file. +@@ -135,7 +139,7 @@ + READ( NIN, FMT = * )NTRA + TRACE = NTRA.GE.0 + IF( TRACE )THEN +- OPEN( NTRA, FILE = SNAPS, STATUS = 'NEW' ) ++ OPEN( NTRA, FILE = SNAPS, STATUS = 'UNKNOWN' ) + END IF + * Read the flag that directs rewinding of the snapshot file. + READ( NIN, FMT = * )REWI +diff -uNr LAPACK.orig/BLAS/TESTING/cblat3.f LAPACK/BLAS/TESTING/cblat3.f +--- LAPACK.orig/BLAS/TESTING/cblat3.f Thu Nov 4 14:23:26 1999 ++++ LAPACK/BLAS/TESTING/cblat3.f Fri May 25 15:58:08 2001 +@@ -46,6 +46,10 @@ + * Jeremy Du Croz, Numerical Algorithms Group Ltd. + * Sven Hammarling, Numerical Algorithms Group Ltd. + * ++* 10-9-00: Change STATUS='NEW' to 'UNKNOWN' so that the testers ++* can be run multiple times without deleting generated ++* output files (susan) ++* + * .. Parameters .. + INTEGER NIN + PARAMETER ( NIN = 5 ) +diff -uNr LAPACK.orig/BLAS/TESTING/dblat2.f LAPACK/BLAS/TESTING/dblat2.f +--- LAPACK.orig/BLAS/TESTING/dblat2.f Thu Nov 4 14:23:27 1999 ++++ LAPACK/BLAS/TESTING/dblat2.f Fri May 25 15:57:41 2001 +@@ -63,6 +63,10 @@ + * Richard Hanson, Sandia National Labs. + * Jeremy Du Croz, NAG Central Office. + * ++* 10-9-00: Change STATUS='NEW' to 'UNKNOWN' so that the testers ++* can be run multiple times without deleting generated ++* output files (susan) ++* + * .. Parameters .. + INTEGER NIN + PARAMETER ( NIN = 5 ) +@@ -121,7 +125,7 @@ + * + READ( NIN, FMT = * )SUMMRY + READ( NIN, FMT = * )NOUT +- OPEN( NOUT, FILE = SUMMRY, STATUS = 'NEW' ) ++ OPEN( NOUT, FILE = SUMMRY, STATUS = 'UNKNOWN' ) + NOUTC = NOUT + * + * Read name and unit number for snapshot output file and open file. +@@ -130,7 +134,7 @@ + READ( NIN, FMT = * )NTRA + TRACE = NTRA.GE.0 + IF( TRACE )THEN +- OPEN( NTRA, FILE = SNAPS, STATUS = 'NEW' ) ++ OPEN( NTRA, FILE = SNAPS, STATUS = 'UNKNOWN' ) + END IF + * Read the flag that directs rewinding of the snapshot file. + READ( NIN, FMT = * )REWI +diff -uNr LAPACK.orig/BLAS/TESTING/dblat3.f LAPACK/BLAS/TESTING/dblat3.f +--- LAPACK.orig/BLAS/TESTING/dblat3.f Thu Nov 4 14:23:27 1999 ++++ LAPACK/BLAS/TESTING/dblat3.f Fri May 25 15:58:04 2001 +@@ -43,6 +43,10 @@ + * Jeremy Du Croz, Numerical Algorithms Group Ltd. + * Sven Hammarling, Numerical Algorithms Group Ltd. + * ++* 10-9-00: Change STATUS='NEW' to 'UNKNOWN' so that the testers ++* can be run multiple times without deleting generated ++* output files (susan) ++* + * .. Parameters .. + INTEGER NIN + PARAMETER ( NIN = 5 ) +@@ -96,7 +100,7 @@ + * + READ( NIN, FMT = * )SUMMRY + READ( NIN, FMT = * )NOUT +- OPEN( NOUT, FILE = SUMMRY, STATUS = 'NEW' ) ++ OPEN( NOUT, FILE = SUMMRY, STATUS = 'UNKNOWN' ) + NOUTC = NOUT + * + * Read name and unit number for snapshot output file and open file. +@@ -105,7 +109,7 @@ + READ( NIN, FMT = * )NTRA + TRACE = NTRA.GE.0 + IF( TRACE )THEN +- OPEN( NTRA, FILE = SNAPS, STATUS = 'NEW' ) ++ OPEN( NTRA, FILE = SNAPS, STATUS = 'UNKNOWN' ) + END IF + * Read the flag that directs rewinding of the snapshot file. + READ( NIN, FMT = * )REWI +diff -uNr LAPACK.orig/BLAS/TESTING/sblat2.f LAPACK/BLAS/TESTING/sblat2.f +--- LAPACK.orig/BLAS/TESTING/sblat2.f Thu Nov 4 14:23:26 1999 ++++ LAPACK/BLAS/TESTING/sblat2.f Fri May 25 15:57:34 2001 +@@ -63,6 +63,10 @@ + * Richard Hanson, Sandia National Labs. + * Jeremy Du Croz, NAG Central Office. + * ++* 10-9-00: Change STATUS='NEW' to 'UNKNOWN' so that the testers ++* can be run multiple times without deleting generated ++* output files (susan) ++* + * .. Parameters .. + INTEGER NIN + PARAMETER ( NIN = 5 ) +@@ -121,7 +125,7 @@ + * + READ( NIN, FMT = * )SUMMRY + READ( NIN, FMT = * )NOUT +- OPEN( NOUT, FILE = SUMMRY, STATUS = 'NEW' ) ++ OPEN( NOUT, FILE = SUMMRY, STATUS = 'UNKNOWN' ) + NOUTC = NOUT + * + * Read name and unit number for snapshot output file and open file. +@@ -130,7 +134,7 @@ + READ( NIN, FMT = * )NTRA + TRACE = NTRA.GE.0 + IF( TRACE )THEN +- OPEN( NTRA, FILE = SNAPS, STATUS = 'NEW' ) ++ OPEN( NTRA, FILE = SNAPS, STATUS = 'UNKNOWN' ) + END IF + * Read the flag that directs rewinding of the snapshot file. + READ( NIN, FMT = * )REWI +diff -uNr LAPACK.orig/BLAS/TESTING/sblat3.f LAPACK/BLAS/TESTING/sblat3.f +--- LAPACK.orig/BLAS/TESTING/sblat3.f Thu Nov 4 14:23:26 1999 ++++ LAPACK/BLAS/TESTING/sblat3.f Fri May 25 15:58:00 2001 +@@ -43,6 +43,10 @@ + * Jeremy Du Croz, Numerical Algorithms Group Ltd. + * Sven Hammarling, Numerical Algorithms Group Ltd. + * ++* 10-9-00: Change STATUS='NEW' to 'UNKNOWN' so that the testers ++* can be run multiple times without deleting generated ++* output files (susan) ++* + * .. Parameters .. + INTEGER NIN + PARAMETER ( NIN = 5 ) +diff -uNr LAPACK.orig/BLAS/TESTING/zblat2.f LAPACK/BLAS/TESTING/zblat2.f +--- LAPACK.orig/BLAS/TESTING/zblat2.f Thu Nov 4 14:23:27 1999 ++++ LAPACK/BLAS/TESTING/zblat2.f Fri May 25 15:57:52 2001 +@@ -64,6 +64,10 @@ + * Richard Hanson, Sandia National Labs. + * Jeremy Du Croz, NAG Central Office. + * ++* 10-9-00: Change STATUS='NEW' to 'UNKNOWN' so that the testers ++* can be run multiple times without deleting generated ++* output files (susan) ++* + * .. Parameters .. + INTEGER NIN + PARAMETER ( NIN = 5 ) +@@ -127,7 +131,7 @@ + * + READ( NIN, FMT = * )SUMMRY + READ( NIN, FMT = * )NOUT +- OPEN( NOUT, FILE = SUMMRY, STATUS = 'NEW' ) ++ OPEN( NOUT, FILE = SUMMRY, STATUS = 'UNKNOWN' ) + NOUTC = NOUT + * + * Read name and unit number for snapshot output file and open file. +@@ -136,7 +140,7 @@ + READ( NIN, FMT = * )NTRA + TRACE = NTRA.GE.0 + IF( TRACE )THEN +- OPEN( NTRA, FILE = SNAPS, STATUS = 'NEW' ) ++ OPEN( NTRA, FILE = SNAPS, STATUS = 'UNKNOWN' ) + END IF + * Read the flag that directs rewinding of the snapshot file. + READ( NIN, FMT = * )REWI +diff -uNr LAPACK.orig/BLAS/TESTING/zblat3.f LAPACK/BLAS/TESTING/zblat3.f +--- LAPACK.orig/BLAS/TESTING/zblat3.f Thu Nov 4 14:23:27 1999 ++++ LAPACK/BLAS/TESTING/zblat3.f Fri May 25 15:58:16 2001 +@@ -46,6 +46,10 @@ + * Jeremy Du Croz, Numerical Algorithms Group Ltd. + * Sven Hammarling, Numerical Algorithms Group Ltd. + * ++* 10-9-00: Change STATUS='NEW' to 'UNKNOWN' so that the testers ++* can be run multiple times without deleting generated ++* output files (susan) ++* + * .. Parameters .. + INTEGER NIN + PARAMETER ( NIN = 5 ) +@@ -104,7 +108,7 @@ + * + READ( NIN, FMT = * )SUMMRY + READ( NIN, FMT = * )NOUT +- OPEN( NOUT, FILE = SUMMRY, STATUS = 'NEW' ) ++ OPEN( NOUT, FILE = SUMMRY, STATUS = 'UNKNOWN' ) + NOUTC = NOUT + * + * Read name and unit number for snapshot output file and open file. +@@ -113,7 +117,7 @@ + READ( NIN, FMT = * )NTRA + TRACE = NTRA.GE.0 + IF( TRACE )THEN +- OPEN( NTRA, FILE = SNAPS, STATUS = 'NEW' ) ++ OPEN( NTRA, FILE = SNAPS, STATUS = 'UNKNOWN' ) + END IF + * Read the flag that directs rewinding of the snapshot file. + READ( NIN, FMT = * )REWI +@@ -1962,6 +1966,7 @@ + * 3-19-92: Initialize ALPHA, BETA, RALPHA, and RBETA (eca) + * 3-19-92: Fix argument 12 in calls to ZSYMM and ZHEMM + * with INFOT = 9 (eca) ++* 10-9-00: Declared INTRINSIC DCMPLX (susan) + * + * .. Scalar Arguments .. + INTEGER ISNUM, NOUT +@@ -1980,6 +1985,8 @@ + * .. External Subroutines .. + EXTERNAL ZGEMM, ZHEMM, ZHER2K, ZHERK, CHKXER, ZSYMM, + $ ZSYR2K, ZSYRK, ZTRMM, ZTRSM ++* .. Intrinsic Functions .. ++ INTRINSIC DCMPLX + * .. Common blocks .. + COMMON /INFOC/INFOT, NOUTC, OK, LERR + * .. Executable Statements .. +diff -uNr LAPACK.orig/INSTALL/make.inc.LINUX LAPACK/INSTALL/make.inc.LINUX +--- LAPACK.orig/INSTALL/make.inc.LINUX Thu Nov 4 14:23:30 1999 ++++ LAPACK/INSTALL/make.inc.LINUX Fri May 25 15:58:36 2001 +@@ -17,7 +17,7 @@ + # desired load options for your machine. + # + FORTRAN = g77 +-OPTS = -funroll-all-loops -fno-f2c -O3 ++OPTS = -funroll-all-loops -O3 + DRVOPTS = $(OPTS) + NOOPT = + LOADER = g77 +diff -uNr LAPACK.orig/SRC/cbdsqr.f LAPACK/SRC/cbdsqr.f +--- LAPACK.orig/SRC/cbdsqr.f Thu Nov 4 14:23:31 1999 ++++ LAPACK/SRC/cbdsqr.f Fri May 25 15:59:05 2001 +@@ -4,7 +4,7 @@ + * -- LAPACK routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* October 31, 1999 ++* April 25, 2001 + * + * .. Scalar Arguments .. + CHARACTER UPLO +@@ -18,14 +18,26 @@ + * Purpose + * ======= + * +-* CBDSQR computes the singular value decomposition (SVD) of a real +-* N-by-N (upper or lower) bidiagonal matrix B: B = Q * S * P' (P' +-* denotes the transpose of P), where S is a diagonal matrix with +-* non-negative diagonal elements (the singular values of B), and Q +-* and P are orthogonal matrices. +-* +-* The routine computes S, and optionally computes U * Q, P' * VT, +-* or Q' * C, for given complex input matrices U, VT, and C. ++* CBDSQR computes the singular values and, optionally, the right and/or ++* left singular vectors from the singular value decomposition (SVD) of ++* a real N-by-N (upper or lower) bidiagonal matrix B using the implicit ++* zero-shift QR algorithm. The SVD of B has the form ++* ++* B = Q * S * P**H ++* ++* where S is the diagonal matrix of singular values, Q is an orthogonal ++* matrix of left singular vectors, and P is an orthogonal matrix of ++* right singular vectors. If left singular vectors are requested, this ++* subroutine actually returns U*Q instead of Q, and, if right singular ++* vectors are requested, this subroutine returns P**H*VT instead of ++* P**H, for given complex input matrices U and VT. When U and VT are ++* the unitary matrices that reduce a general matrix A to bidiagonal ++* form: A = U*B*VT, as computed by CGEBRD, then ++* ++* A = (U*Q) * S * (P**H*VT) ++* ++* is the SVD of A. Optionally, the subroutine may also compute Q**H*C ++* for a given complex input matrix C. + * + * See "Computing Small Singular Values of Bidiagonal Matrices With + * Guaranteed High Relative Accuracy," by J. Demmel and W. Kahan, +@@ -61,18 +73,17 @@ + * order. + * + * E (input/output) REAL array, dimension (N) +-* On entry, the elements of E contain the +-* offdiagonal elements of of the bidiagonal matrix whose SVD +-* is desired. On normal exit (INFO = 0), E is destroyed. +-* If the algorithm does not converge (INFO > 0), D and E ++* On entry, the N-1 offdiagonal elements of the bidiagonal ++* matrix B. ++* On exit, if INFO = 0, E is destroyed; if INFO > 0, D and E + * will contain the diagonal and superdiagonal elements of a + * bidiagonal matrix orthogonally equivalent to the one given + * as input. E(N) is used for workspace. + * + * VT (input/output) COMPLEX array, dimension (LDVT, NCVT) + * On entry, an N-by-NCVT matrix VT. +-* On exit, VT is overwritten by P' * VT. +-* VT is not referenced if NCVT = 0. ++* On exit, VT is overwritten by P**H * VT. ++* Not referenced if NCVT = 0. + * + * LDVT (input) INTEGER + * The leading dimension of the array VT. +@@ -81,21 +92,22 @@ + * U (input/output) COMPLEX array, dimension (LDU, N) + * On entry, an NRU-by-N matrix U. + * On exit, U is overwritten by U * Q. +-* U is not referenced if NRU = 0. ++* Not referenced if NRU = 0. + * + * LDU (input) INTEGER + * The leading dimension of the array U. LDU >= max(1,NRU). + * + * C (input/output) COMPLEX array, dimension (LDC, NCC) + * On entry, an N-by-NCC matrix C. +-* On exit, C is overwritten by Q' * C. +-* C is not referenced if NCC = 0. ++* On exit, C is overwritten by Q**H * C. ++* Not referenced if NCC = 0. + * + * LDC (input) INTEGER + * The leading dimension of the array C. + * LDC >= max(1,N) if NCC > 0; LDC >=1 if NCC = 0. + * +-* RWORK (workspace) REAL array, dimension (4*N) ++* RWORK (workspace) REAL array, dimension (2*N) ++* if NCVT = NRU = NCC = 0, (max(1, 4*N-4)) otherwise + * + * INFO (output) INTEGER + * = 0: successful exit +diff -uNr LAPACK.orig/SRC/cgebd2.f LAPACK/SRC/cgebd2.f +--- LAPACK.orig/SRC/cgebd2.f Thu Nov 4 14:24:07 1999 ++++ LAPACK/SRC/cgebd2.f Fri May 25 15:59:27 2001 +@@ -3,7 +3,7 @@ + * -- LAPACK routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* September 30, 1994 ++* May 7, 2001 + * + * .. Scalar Arguments .. + INTEGER INFO, LDA, M, N +@@ -172,8 +172,9 @@ + * + * Apply H(i)' to A(i:m,i+1:n) from the left + * +- CALL CLARF( 'Left', M-I+1, N-I, A( I, I ), 1, +- $ CONJG( TAUQ( I ) ), A( I, I+1 ), LDA, WORK ) ++ IF( I.LT.N ) ++ $ CALL CLARF( 'Left', M-I+1, N-I, A( I, I ), 1, ++ $ CONJG( TAUQ( I ) ), A( I, I+1 ), LDA, WORK ) + A( I, I ) = D( I ) + * + IF( I.LT.N ) THEN +@@ -215,8 +216,9 @@ + * + * Apply G(i) to A(i+1:m,i:n) from the right + * +- CALL CLARF( 'Right', M-I, N-I+1, A( I, I ), LDA, TAUP( I ), +- $ A( MIN( I+1, M ), I ), LDA, WORK ) ++ IF( I.LT.M ) ++ $ CALL CLARF( 'Right', M-I, N-I+1, A( I, I ), LDA, ++ $ TAUP( I ), A( MIN( I+1, M ), I ), LDA, WORK ) + CALL CLACGV( N-I+1, A( I, I ), LDA ) + A( I, I ) = D( I ) + * +diff -uNr LAPACK.orig/SRC/cgees.f LAPACK/SRC/cgees.f +--- LAPACK.orig/SRC/cgees.f Thu Nov 4 14:24:08 1999 ++++ LAPACK/SRC/cgees.f Fri May 25 15:59:55 2001 +@@ -5,6 +5,7 @@ + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University + * June 30, 1999 ++* 8-15-00: Improve consistency of WS calculations (eca) + * + * .. Scalar Arguments .. + CHARACTER JOBVS, SORT +@@ -89,10 +90,9 @@ + * The dimension of the array WORK. LWORK >= max(1,2*N). + * For good performance, LWORK must generally be larger. + * +-* If LWORK = -1, then a workspace query is assumed; the routine +-* only calculates the optimal size of the WORK array, returns +-* this value as the first entry of the WORK array, and no error +-* message related to LWORK is issued by XERBLA. ++* If LWORK = -1, a workspace query is assumed. The optimal ++* size for the WORK array is calculated and stored in WORK(1), ++* and no other work except argument checking is performed. + * + * RWORK (workspace) REAL array, dimension (N) + * +@@ -120,11 +120,13 @@ + * ===================================================================== + * + * .. Parameters .. ++ INTEGER LQUERV ++ PARAMETER ( LQUERV = -1 ) + REAL ZERO, ONE + PARAMETER ( ZERO = 0.0E0, ONE = 1.0E0 ) + * .. + * .. Local Scalars .. +- LOGICAL LQUERY, SCALEA, WANTST, WANTVS ++ LOGICAL SCALEA, WANTST, WANTVS + INTEGER HSWORK, I, IBAL, ICOND, IERR, IEVAL, IHI, ILO, + $ ITAU, IWRK, K, MAXB, MAXWRK, MINWRK + REAL ANRM, BIGNUM, CSCALE, EPS, S, SEP, SMLNUM +@@ -150,7 +152,6 @@ + * Test the input arguments + * + INFO = 0 +- LQUERY = ( LWORK.EQ.-1 ) + WANTVS = LSAME( JOBVS, 'V' ) + WANTST = LSAME( SORT, 'S' ) + IF( ( .NOT.WANTVS ) .AND. ( .NOT.LSAME( JOBVS, 'N' ) ) ) THEN +@@ -177,7 +178,7 @@ + * the worst case.) + * + MINWRK = 1 +- IF( INFO.EQ.0 .AND. ( LWORK.GE.1 .OR. LQUERY ) ) THEN ++ IF( INFO.EQ.0 ) THEN + MAXWRK = N + N*ILAENV( 1, 'CGEHRD', ' ', N, 1, N, 0 ) + MINWRK = MAX( 1, 2*N ) + IF( .NOT.WANTVS ) THEN +@@ -196,19 +197,17 @@ + MAXWRK = MAX( MAXWRK, HSWORK, 1 ) + END IF + WORK( 1 ) = MAXWRK ++ IF( LWORK.LT.MINWRK .AND. LWORK.NE.LQUERV ) ++ $ INFO = -12 + END IF +- IF( LWORK.LT.MINWRK .AND. .NOT.LQUERY ) THEN +- INFO = -12 +- END IF ++* ++* Quick returns ++* + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'CGEES ', -INFO ) + RETURN +- ELSE IF( LQUERY ) THEN +- RETURN + END IF +-* +-* Quick return if possible +-* ++ IF( LWORK.EQ.LQUERV ) RETURN + IF( N.EQ.0 ) THEN + SDIM = 0 + RETURN +diff -uNr LAPACK.orig/SRC/cgeesx.f LAPACK/SRC/cgeesx.f +--- LAPACK.orig/SRC/cgeesx.f Thu Nov 4 14:24:08 1999 ++++ LAPACK/SRC/cgeesx.f Fri May 25 16:00:18 2001 +@@ -6,6 +6,7 @@ + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University + * June 30, 1999 ++* 8-15-00: Do WS calculations if LWORK = -1 (eca) + * + * .. Scalar Arguments .. + CHARACTER JOBVS, SENSE, SORT +@@ -119,6 +120,10 @@ + * this routine. Note that 2*SDIM*(N-SDIM) <= N*N/2. + * For good performance, LWORK must generally be larger. + * ++* If LWORK = -1, a workspace query is assumed. The optimal ++* size for the WORK array is calculated and stored in WORK(1), ++* and no other work except argument checking is performed. ++* + * RWORK (workspace) REAL array, dimension (N) + * + * BWORK (workspace) LOGICAL array, dimension (N) +@@ -144,6 +149,8 @@ + * ===================================================================== + * + * .. Parameters .. ++ INTEGER LQUERV ++ PARAMETER ( LQUERV = -1 ) + REAL ZERO, ONE + PARAMETER ( ZERO = 0.0E0, ONE = 1.0E0 ) + * .. +@@ -211,7 +218,7 @@ + * in the code.) + * + MINWRK = 1 +- IF( INFO.EQ.0 .AND. ( LWORK.GE.1 ) ) THEN ++ IF( INFO.EQ.0 ) THEN + MAXWRK = N + N*ILAENV( 1, 'CGEHRD', ' ', N, 1, N, 0 ) + MINWRK = MAX( 1, 2*N ) + IF( .NOT.WANTVS ) THEN +@@ -229,18 +236,24 @@ + HSWORK = MAX( K*( K+2 ), 2*N ) + MAXWRK = MAX( MAXWRK, HSWORK, 1 ) + END IF ++* ++* Estimate the workspace needed by CTRSEN. ++* ++ IF( WANTST ) THEN ++ MAXWRK = MAX( MAXWRK, (N*N+1)/2 ) ++ END IF + WORK( 1 ) = MAXWRK ++ IF( LWORK.LT.MINWRK .AND. LWORK.NE.LQUERV ) ++ & INFO = -15 + END IF +- IF( LWORK.LT.MINWRK ) THEN +- INFO = -15 +- END IF ++* ++* Quick returns ++* + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'CGEESX', -INFO ) + RETURN + END IF +-* +-* Quick return if possible +-* ++ IF( LWORK.EQ.LQUERV ) RETURN + IF( N.EQ.0 ) THEN + SDIM = 0 + RETURN +diff -uNr LAPACK.orig/SRC/cgeev.f LAPACK/SRC/cgeev.f +--- LAPACK.orig/SRC/cgeev.f Thu Nov 4 14:24:08 1999 ++++ LAPACK/SRC/cgeev.f Fri May 25 16:00:48 2001 +@@ -5,6 +5,7 @@ + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University + * June 30, 1999 ++* 8-15-00: Improve consistency of WS calculations (eca) + * + * .. Scalar Arguments .. + CHARACTER JOBVL, JOBVR +@@ -85,10 +86,9 @@ + * The dimension of the array WORK. LWORK >= max(1,2*N). + * For good performance, LWORK must generally be larger. + * +-* If LWORK = -1, then a workspace query is assumed; the routine +-* only calculates the optimal size of the WORK array, returns +-* this value as the first entry of the WORK array, and no error +-* message related to LWORK is issued by XERBLA. ++* If LWORK = -1, a workspace query is assumed. The optimal ++* size for the WORK array is calculated and stored in WORK(1), ++* and no other work except argument checking is performed. + * + * RWORK (workspace) REAL array, dimension (2*N) + * +@@ -103,11 +103,13 @@ + * ===================================================================== + * + * .. Parameters .. ++ INTEGER LQUERV ++ PARAMETER ( LQUERV = -1 ) + REAL ZERO, ONE + PARAMETER ( ZERO = 0.0E0, ONE = 1.0E0 ) + * .. + * .. Local Scalars .. +- LOGICAL LQUERY, SCALEA, WANTVL, WANTVR ++ LOGICAL SCALEA, WANTVL, WANTVR + CHARACTER SIDE + INTEGER HSWORK, I, IBAL, IERR, IHI, ILO, IRWORK, ITAU, + $ IWRK, K, MAXB, MAXWRK, MINWRK, NOUT +@@ -136,7 +138,6 @@ + * Test the input arguments + * + INFO = 0 +- LQUERY = ( LWORK.EQ.-1 ) + WANTVL = LSAME( JOBVL, 'V' ) + WANTVR = LSAME( JOBVR, 'V' ) + IF( ( .NOT.WANTVL ) .AND. ( .NOT.LSAME( JOBVL, 'N' ) ) ) THEN +@@ -165,7 +166,7 @@ + * the worst case.) + * + MINWRK = 1 +- IF( INFO.EQ.0 .AND. ( LWORK.GE.1 .OR. LQUERY ) ) THEN ++ IF( INFO.EQ.0 ) THEN + MAXWRK = N + N*ILAENV( 1, 'CGEHRD', ' ', N, 1, N, 0 ) + IF( ( .NOT.WANTVL ) .AND. ( .NOT.WANTVR ) ) THEN + MINWRK = MAX( 1, 2*N ) +@@ -185,19 +186,17 @@ + MAXWRK = MAX( MAXWRK, HSWORK, 2*N ) + END IF + WORK( 1 ) = MAXWRK ++ IF( LWORK.LT.MINWRK .AND. LWORK.NE.LQUERV ) ++ $ INFO = -12 + END IF +- IF( LWORK.LT.MINWRK .AND. .NOT.LQUERY ) THEN +- INFO = -12 +- END IF ++* ++* Quick returns ++* + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'CGEEV ', -INFO ) + RETURN +- ELSE IF( LQUERY ) THEN +- RETURN + END IF +-* +-* Quick return if possible +-* ++ IF( LWORK.EQ.LQUERV ) RETURN + IF( N.EQ.0 ) + $ RETURN + * +diff -uNr LAPACK.orig/SRC/cgeevx.f LAPACK/SRC/cgeevx.f +--- LAPACK.orig/SRC/cgeevx.f Thu Nov 4 14:24:08 1999 ++++ LAPACK/SRC/cgeevx.f Fri May 25 16:01:10 2001 +@@ -6,6 +6,7 @@ + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University + * June 30, 1999 ++* 8-15-00: Improve consistency of WS calculations (eca) + * + * .. Scalar Arguments .. + CHARACTER BALANC, JOBVL, JOBVR, SENSE +@@ -166,10 +167,9 @@ + * LWORK >= N*N+2*N. + * For good performance, LWORK must generally be larger. + * +-* If LWORK = -1, then a workspace query is assumed; the routine +-* only calculates the optimal size of the WORK array, returns +-* this value as the first entry of the WORK array, and no error +-* message related to LWORK is issued by XERBLA. ++* If LWORK = -1, a workspace query is assumed. The optimal ++* size for the WORK array is calculated and stored in WORK(1), ++* and no other work except argument checking is performed. + * + * RWORK (workspace) REAL array, dimension (2*N) + * +@@ -184,12 +184,14 @@ + * ===================================================================== + * + * .. Parameters .. ++ INTEGER LQUERV ++ PARAMETER ( LQUERV = -1 ) + REAL ZERO, ONE + PARAMETER ( ZERO = 0.0E0, ONE = 1.0E0 ) + * .. + * .. Local Scalars .. +- LOGICAL LQUERY, SCALEA, WANTVL, WANTVR, WNTSNB, WNTSNE, +- $ WNTSNN, WNTSNV ++ LOGICAL SCALEA, WANTVL, WANTVR, WNTSNB, WNTSNE, WNTSNN, ++ $ WNTSNV + CHARACTER JOB, SIDE + INTEGER HSWORK, I, ICOND, IERR, ITAU, IWRK, K, MAXB, + $ MAXWRK, MINWRK, NOUT +@@ -219,7 +221,6 @@ + * Test the input arguments + * + INFO = 0 +- LQUERY = ( LWORK.EQ.-1 ) + WANTVL = LSAME( JOBVL, 'V' ) + WANTVR = LSAME( JOBVR, 'V' ) + WNTSNN = LSAME( SENSE, 'N' ) +@@ -259,7 +260,7 @@ + * the worst case.) + * + MINWRK = 1 +- IF( INFO.EQ.0 .AND. ( LWORK.GE.1 .OR. LQUERY ) ) THEN ++ IF( INFO.EQ.0 ) THEN + MAXWRK = N + N*ILAENV( 1, 'CGEHRD', ' ', N, 1, N, 0 ) + IF( ( .NOT.WANTVL ) .AND. ( .NOT.WANTVR ) ) THEN + MINWRK = MAX( 1, 2*N ) +@@ -293,19 +294,17 @@ + MAXWRK = MAX( MAXWRK, 2*N, 1 ) + END IF + WORK( 1 ) = MAXWRK ++ IF( LWORK.LT.MINWRK .AND. LWORK.NE.LQUERV ) ++ $ INFO = -20 + END IF +- IF( LWORK.LT.MINWRK .AND. .NOT.LQUERY ) THEN +- INFO = -20 +- END IF ++* ++* Quick returns ++* + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'CGEEVX', -INFO ) + RETURN +- ELSE IF( LQUERY ) THEN +- RETURN + END IF +-* +-* Quick return if possible +-* ++ IF( LWORK.EQ.LQUERV ) RETURN + IF( N.EQ.0 ) + $ RETURN + * +diff -uNr LAPACK.orig/SRC/cgegs.f LAPACK/SRC/cgegs.f +--- LAPACK.orig/SRC/cgegs.f Thu Nov 4 14:24:08 1999 ++++ LAPACK/SRC/cgegs.f Fri May 25 16:01:59 2001 +@@ -5,7 +5,7 @@ + * -- LAPACK driver routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* June 30, 1999 ++* April 26, 2001 + * + * .. Scalar Arguments .. + CHARACTER JOBVSL, JOBVSR +@@ -23,83 +23,70 @@ + * + * This routine is deprecated and has been replaced by routine CGGES. + * +-* CGEGS computes for a pair of N-by-N complex nonsymmetric matrices A, +-* B: the generalized eigenvalues (alpha, beta), the complex Schur +-* form (A, B), and optionally left and/or right Schur vectors +-* (VSL and VSR). +-* +-* (If only the generalized eigenvalues are needed, use the driver CGEGV +-* instead.) +-* +-* A generalized eigenvalue for a pair of matrices (A,B) is, roughly +-* speaking, a scalar w or a ratio alpha/beta = w, such that A - w*B +-* is singular. It is usually represented as the pair (alpha,beta), +-* as there is a reasonable interpretation for beta=0, and even for +-* both being zero. A good beginning reference is the book, "Matrix +-* Computations", by G. Golub & C. van Loan (Johns Hopkins U. Press) +-* +-* The (generalized) Schur form of a pair of matrices is the result of +-* multiplying both matrices on the left by one unitary matrix and +-* both on the right by another unitary matrix, these two unitary +-* matrices being chosen so as to bring the pair of matrices into +-* upper triangular form with the diagonal elements of B being +-* non-negative real numbers (this is also called complex Schur form.) +-* +-* The left and right Schur vectors are the columns of VSL and VSR, +-* respectively, where VSL and VSR are the unitary matrices +-* which reduce A and B to Schur form: +-* +-* Schur form of (A,B) = ( (VSL)**H A (VSR), (VSL)**H B (VSR) ) ++* CGEGS computes the eigenvalues, Schur form, and, optionally, the ++* left and or/right Schur vectors of a complex matrix pair (A,B). ++* Given two square matrices A and B, the generalized Schur ++* factorization has the form ++* ++* A = Q*S*Z**H, B = Q*T*Z**H ++* ++* where Q and Z are unitary matrices and S and T are upper triangular. ++* The columns of Q are the left Schur vectors ++* and the columns of Z are the right Schur vectors. ++* ++* If only the eigenvalues of (A,B) are needed, the driver routine ++* CGEGV should be used instead. See CGEGV for a description of the ++* eigenvalues of the generalized nonsymmetric eigenvalue problem ++* (GNEP). + * + * Arguments + * ========= + * + * JOBVSL (input) CHARACTER*1 + * = 'N': do not compute the left Schur vectors; +-* = 'V': compute the left Schur vectors. ++* = 'V': compute the left Schur vectors (returned in VSL). + * + * JOBVSR (input) CHARACTER*1 + * = 'N': do not compute the right Schur vectors; +-* = 'V': compute the right Schur vectors. ++* = 'V': compute the right Schur vectors (returned in VSR). + * + * N (input) INTEGER + * The order of the matrices A, B, VSL, and VSR. N >= 0. + * + * A (input/output) COMPLEX array, dimension (LDA, N) +-* On entry, the first of the pair of matrices whose generalized +-* eigenvalues and (optionally) Schur vectors are to be +-* computed. +-* On exit, the generalized Schur form of A. ++* On entry, the matrix A. ++* On exit, the upper triangular matrix S from the generalized ++* Schur factorization. + * + * LDA (input) INTEGER + * The leading dimension of A. LDA >= max(1,N). + * + * B (input/output) COMPLEX array, dimension (LDB, N) +-* On entry, the second of the pair of matrices whose +-* generalized eigenvalues and (optionally) Schur vectors are +-* to be computed. +-* On exit, the generalized Schur form of B. ++* On entry, the matrix B. ++* On exit, the upper triangular matrix T from the generalized ++* Schur factorization. + * + * LDB (input) INTEGER + * The leading dimension of B. LDB >= max(1,N). + * + * ALPHA (output) COMPLEX array, dimension (N) ++* The complex scalars alpha that define the eigenvalues of ++* GNEP. ALPHA(j) = S(j,j), the diagonal element of the Schur ++* form of A. ++* + * BETA (output) COMPLEX array, dimension (N) +-* On exit, ALPHA(j)/BETA(j), j=1,...,N, will be the +-* generalized eigenvalues. ALPHA(j), j=1,...,N and BETA(j), +-* j=1,...,N are the diagonals of the complex Schur form (A,B) +-* output by CGEGS. The BETA(j) will be non-negative real. +-* +-* Note: the quotients ALPHA(j)/BETA(j) may easily over- or +-* underflow, and BETA(j) may even be zero. Thus, the user +-* should avoid naively computing the ratio alpha/beta. +-* However, ALPHA will be always less than and usually +-* comparable with norm(A) in magnitude, and BETA always less +-* than and usually comparable with norm(B). ++* The non-negative real scalars beta that define the ++* eigenvalues of GNEP. BETA(j) = T(j,j), the diagonal element ++* of the triangular factor T. ++* ++* Together, the quantities alpha = ALPHA(j) and beta = BETA(j) ++* represent the j-th eigenvalue of the matrix pair (A,B), in ++* one of the forms lambda = alpha/beta or mu = beta/alpha. ++* Since either lambda or mu may overflow, they should not, ++* in general, be computed. + * + * VSL (output) COMPLEX array, dimension (LDVSL,N) +-* If JOBVSL = 'V', VSL will contain the left Schur vectors. +-* (See "Purpose", above.) ++* If JOBVSL = 'V', the matrix of left Schur vectors Q. + * Not referenced if JOBVSL = 'N'. + * + * LDVSL (input) INTEGER +@@ -107,8 +94,7 @@ + * if JOBVSL = 'V', LDVSL >= N. + * + * VSR (output) COMPLEX array, dimension (LDVSR,N) +-* If JOBVSR = 'V', VSR will contain the right Schur vectors. +-* (See "Purpose", above.) ++* If JOBVSR = 'V', the matrix of right Schur vectors Z. + * Not referenced if JOBVSR = 'N'. + * + * LDVSR (input) INTEGER +diff -uNr LAPACK.orig/SRC/cgegv.f LAPACK/SRC/cgegv.f +--- LAPACK.orig/SRC/cgegv.f Thu Nov 4 14:24:08 1999 ++++ LAPACK/SRC/cgegv.f Fri May 25 16:02:21 2001 +@@ -4,7 +4,7 @@ + * -- LAPACK driver routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* June 30, 1999 ++* April 26, 2001 + * + * .. Scalar Arguments .. + CHARACTER JOBVL, JOBVR +@@ -22,22 +22,28 @@ + * + * This routine is deprecated and has been replaced by routine CGGEV. + * +-* CGEGV computes for a pair of N-by-N complex nonsymmetric matrices A +-* and B, the generalized eigenvalues (alpha, beta), and optionally, +-* the left and/or right generalized eigenvectors (VL and VR). +-* +-* A generalized eigenvalue for a pair of matrices (A,B) is, roughly +-* speaking, a scalar w or a ratio alpha/beta = w, such that A - w*B +-* is singular. It is usually represented as the pair (alpha,beta), +-* as there is a reasonable interpretation for beta=0, and even for +-* both being zero. A good beginning reference is the book, "Matrix +-* Computations", by G. Golub & C. van Loan (Johns Hopkins U. Press) +-* +-* A right generalized eigenvector corresponding to a generalized +-* eigenvalue w for a pair of matrices (A,B) is a vector r such +-* that (A - w B) r = 0 . A left generalized eigenvector is a vector +-* l such that l**H * (A - w B) = 0, where l**H is the +-* conjugate-transpose of l. ++* CGEGV computes the eigenvalues and, optionally, the left and/or right ++* eigenvectors of a complex matrix pair (A,B). ++* Given two square matrices A and B, ++* the generalized nonsymmetric eigenvalue problem (GNEP) is to find the ++* eigenvalues lambda and corresponding (non-zero) eigenvectors x such ++* that ++* A*x = lambda*B*x. ++* ++* An alternate form is to find the eigenvalues mu and corresponding ++* eigenvectors y such that ++* mu*A*y = B*y. ++* ++* These two forms are equivalent with mu = 1/lambda and x = y if ++* neither lambda nor mu is zero. In order to deal with the case that ++* lambda or mu is zero or small, two values alpha and beta are returned ++* for each eigenvalue, such that lambda = alpha/beta and ++* mu = beta/alpha. ++* ++* The vectors x and y in the above equations are right eigenvectors of ++* the matrix pair (A,B). Vectors u and v satisfying ++* u**H*A = lambda*u**H*B or mu*v**H*A = v**H*B ++* are left eigenvectors of (A,B). + * + * Note: this routine performs "full balancing" on A and B -- see + * "Further Details", below. +@@ -47,56 +53,62 @@ + * + * JOBVL (input) CHARACTER*1 + * = 'N': do not compute the left generalized eigenvectors; +-* = 'V': compute the left generalized eigenvectors. ++* = 'V': compute the left generalized eigenvectors (returned ++* in VL). + * + * JOBVR (input) CHARACTER*1 + * = 'N': do not compute the right generalized eigenvectors; +-* = 'V': compute the right generalized eigenvectors. ++* = 'V': compute the right generalized eigenvectors (returned ++* in VR). + * + * N (input) INTEGER + * The order of the matrices A, B, VL, and VR. N >= 0. + * + * A (input/output) COMPLEX array, dimension (LDA, N) +-* On entry, the first of the pair of matrices whose +-* generalized eigenvalues and (optionally) generalized +-* eigenvectors are to be computed. +-* On exit, the contents will have been destroyed. (For a +-* description of the contents of A on exit, see "Further +-* Details", below.) ++* On entry, the matrix A. ++* If JOBVL = 'V' or JOBVR = 'V', then on exit A ++* contains the Schur form of A from the generalized Schur ++* factorization of the pair (A,B) after balancing. If no ++* eigenvectors were computed, then only the diagonal elements ++* of the Schur form will be correct. See CGGHRD and CHGEQZ ++* for details. + * + * LDA (input) INTEGER + * The leading dimension of A. LDA >= max(1,N). + * + * B (input/output) COMPLEX array, dimension (LDB, N) +-* On entry, the second of the pair of matrices whose +-* generalized eigenvalues and (optionally) generalized +-* eigenvectors are to be computed. +-* On exit, the contents will have been destroyed. (For a +-* description of the contents of B on exit, see "Further +-* Details", below.) ++* On entry, the matrix B. ++* If JOBVL = 'V' or JOBVR = 'V', then on exit B contains the ++* upper triangular matrix obtained from B in the generalized ++* Schur factorization of the pair (A,B) after balancing. ++* If no eigenvectors were computed, then only the diagonal ++* elements of B will be correct. See CGGHRD and CHGEQZ for ++* details. + * + * LDB (input) INTEGER + * The leading dimension of B. LDB >= max(1,N). + * + * ALPHA (output) COMPLEX array, dimension (N) +-* BETA (output) COMPLEX array, dimension (N) +-* On exit, ALPHA(j)/BETA(j), j=1,...,N, will be the +-* generalized eigenvalues. ++* The complex scalars alpha that define the eigenvalues of ++* GNEP. + * +-* Note: the quotients ALPHA(j)/BETA(j) may easily over- or +-* underflow, and BETA(j) may even be zero. Thus, the user +-* should avoid naively computing the ratio alpha/beta. +-* However, ALPHA will be always less than and usually +-* comparable with norm(A) in magnitude, and BETA always less +-* than and usually comparable with norm(B). ++* BETA (output) COMPLEX array, dimension (N) ++* The complex scalars beta that define the eigenvalues of GNEP. ++* ++* Together, the quantities alpha = ALPHA(j) and beta = BETA(j) ++* represent the j-th eigenvalue of the matrix pair (A,B), in ++* one of the forms lambda = alpha/beta or mu = beta/alpha. ++* Since either lambda or mu may overflow, they should not, ++* in general, be computed. ++ + * + * VL (output) COMPLEX array, dimension (LDVL,N) +-* If JOBVL = 'V', the left generalized eigenvectors. (See +-* "Purpose", above.) +-* Each eigenvector will be scaled so the largest component +-* will have abs(real part) + abs(imag. part) = 1, *except* +-* that for eigenvalues with alpha=beta=0, a zero vector will +-* be returned as the corresponding eigenvector. ++* If JOBVL = 'V', the left eigenvectors u(j) are stored ++* in the columns of VL, in the same order as their eigenvalues. ++* Each eigenvector is scaled so that its largest component has ++* abs(real part) + abs(imag. part) = 1, except for eigenvectors ++* corresponding to an eigenvalue with alpha = beta = 0, which ++* are set to zero. + * Not referenced if JOBVL = 'N'. + * + * LDVL (input) INTEGER +@@ -104,12 +116,12 @@ + * if JOBVL = 'V', LDVL >= N. + * + * VR (output) COMPLEX array, dimension (LDVR,N) +-* If JOBVR = 'V', the right generalized eigenvectors. (See +-* "Purpose", above.) +-* Each eigenvector will be scaled so the largest component +-* will have abs(real part) + abs(imag. part) = 1, *except* +-* that for eigenvalues with alpha=beta=0, a zero vector will +-* be returned as the corresponding eigenvector. ++* If JOBVR = 'V', the right eigenvectors x(j) are stored ++* in the columns of VR, in the same order as their eigenvalues. ++* Each eigenvector is scaled so that its largest component has ++* abs(real part) + abs(imag. part) = 1, except for eigenvectors ++* corresponding to an eigenvalue with alpha = beta = 0, which ++* are set to zero. + * Not referenced if JOBVR = 'N'. + * + * LDVR (input) INTEGER +diff -uNr LAPACK.orig/SRC/cgelsd.f LAPACK/SRC/cgelsd.f +--- LAPACK.orig/SRC/cgelsd.f Thu Nov 4 14:26:25 1999 ++++ LAPACK/SRC/cgelsd.f Fri May 25 16:03:27 2001 +@@ -4,7 +4,8 @@ + * -- LAPACK driver routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* October 31, 1999 ++* June 30, 1999 ++* 8-15-00: Improve consistency of WS calculations (eca) + * + * .. Scalar Arguments .. + INTEGER INFO, LDA, LDB, LWORK, M, N, NRHS, RANK +@@ -64,7 +65,8 @@ + * + * A (input/output) COMPLEX array, dimension (LDA,N) + * On entry, the M-by-N matrix A. +-* On exit, A has been destroyed. ++* On exit, the first min(m,n) rows of A are overwritten with ++* its right singular vectors, stored rowwise. + * + * LDA (input) INTEGER + * The leading dimension of the array A. LDA >= max(1,M). +@@ -96,32 +98,24 @@ + * On exit, if INFO = 0, WORK(1) returns the optimal LWORK. + * + * LWORK (input) INTEGER +-* The dimension of the array WORK. LWORK must be at least 1. ++* The dimension of the array WORK. LWORK >= 1. + * The exact minimum amount of workspace needed depends on M, +-* N and NRHS. As long as LWORK is at least +-* 2 * N + N * NRHS +-* if M is greater than or equal to N or +-* 2 * M + M * NRHS +-* if M is less than N, the code will execute correctly. ++* N and NRHS. ++* If M >= N, LWORK >= 2*N + N*NRHS. ++* If M < N, LWORK >= 2*M + M*NRHS. + * For good performance, LWORK should generally be larger. + * +-* If LWORK = -1, then a workspace query is assumed; the routine +-* only calculates the optimal size of the WORK array, returns +-* this value as the first entry of the WORK array, and no error +-* message related to LWORK is issued by XERBLA. +-* +-* +-* RWORK (workspace) REAL array, dimension at least +-* 10*N + 2*N*SMLSIZ + 8*N*NLVL + 3*SMLSIZ*NRHS + +-* (SMLSIZ+1)**2 +-* if M is greater than or equal to N or +-* 10*M + 2*M*SMLSIZ + 8*M*NLVL + 3*SMLSIZ*NRHS + +-* (SMLSIZ+1)**2 +-* if M is less than N, the code will execute correctly. ++* If LWORK = -1, a workspace query is assumed. The optimal ++* size for the WORK array is calculated and stored in WORK(1), ++* and no other work except argument checking is performed. ++* ++* RWORK (workspace) REAL array, dimension (LRWORK) ++* If M >= N, LRWORK >= 8*N + 2*N*SMLSIZ + 8*N*NLVL + N*NRHS. ++* If M < N, LRWORK >= 8*M + 2*M*SMLSIZ + 8*M*NLVL + M*NRHS. + * SMLSIZ is returned by ILAENV and is equal to the maximum + * size of the subproblems at the bottom of the computation + * tree (usually about 25), and +-* NLVL = MAX( 0, INT( LOG_2( MIN( M,N )/(SMLSIZ+1) ) ) + 1 ) ++* NLVL = INT( LOG_2( MIN( M,N )/(SMLSIZ+1) ) ) + 1 + * + * IWORK (workspace) INTEGER array, dimension (LIWORK) + * LIWORK >= 3 * MINMN * NLVL + 11 * MINMN, +@@ -145,13 +139,14 @@ + * ===================================================================== + * + * .. Parameters .. ++ INTEGER LQUERV ++ PARAMETER ( LQUERV = -1 ) + REAL ZERO, ONE + PARAMETER ( ZERO = 0.0E+0, ONE = 1.0E+0 ) + COMPLEX CZERO + PARAMETER ( CZERO = ( 0.0E+0, 0.0E+0 ) ) + * .. + * .. Local Scalars .. +- LOGICAL LQUERY + INTEGER IASCL, IBSCL, IE, IL, ITAU, ITAUP, ITAUQ, + $ LDWORK, MAXMN, MAXWRK, MINMN, MINWRK, MM, + $ MNTHR, NRWORK, NWORK, SMLSIZ +@@ -179,7 +174,6 @@ + MINMN = MIN( M, N ) + MAXMN = MAX( M, N ) + MNTHR = ILAENV( 6, 'CGELSD', ' ', M, N, NRHS, -1 ) +- LQUERY = ( LWORK.EQ.-1 ) + IF( M.LT.0 ) THEN + INFO = -1 + ELSE IF( N.LT.0 ) THEN +@@ -263,20 +257,17 @@ + END IF + MINWRK = MIN( MINWRK, MAXWRK ) + WORK( 1 ) = CMPLX( MAXWRK, 0 ) +- IF( LWORK.LT.MINWRK .AND. .NOT.LQUERY ) THEN +- INFO = -12 +- END IF ++ IF( LWORK.LT.MINWRK .AND. LWORK.NE.LQUERV ) ++ $ INFO = -12 + END IF + * ++* Quick returns ++* + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'CGELSD', -INFO ) + RETURN +- ELSE IF( LQUERY ) THEN +- GO TO 10 + END IF +-* +-* Quick return if possible. +-* ++ IF( LWORK.EQ.LQUERV ) RETURN + IF( M.EQ.0 .OR. N.EQ.0 ) THEN + RANK = 0 + RETURN +diff -uNr LAPACK.orig/SRC/cgelss.f LAPACK/SRC/cgelss.f +--- LAPACK.orig/SRC/cgelss.f Thu Nov 4 14:24:09 1999 ++++ LAPACK/SRC/cgelss.f Fri May 25 16:03:50 2001 +@@ -4,7 +4,7 @@ + * -- LAPACK driver routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* October 31, 1999 ++* April 25, 2001 + * + * .. Scalar Arguments .. + INTEGER INFO, LDA, LDB, LWORK, M, N, NRHS, RANK +@@ -87,10 +87,9 @@ + * LWORK >= 2*min(M,N) + max(M,N,NRHS) + * For good performance, LWORK should generally be larger. + * +-* If LWORK = -1, then a workspace query is assumed; the routine +-* only calculates the optimal size of the WORK array, returns +-* this value as the first entry of the WORK array, and no error +-* message related to LWORK is issued by XERBLA. ++* If LWORK = -1, a workspace query is assumed. The optimal ++* size for the WORK array is calculated and stored in WORK(1), ++* and no other work except argument checking is performed. + * + * RWORK (workspace) REAL array, dimension (5*min(M,N)) + * +@@ -164,7 +163,7 @@ + * immediately following subroutine, as returned by ILAENV.) + * + MINWRK = 1 +- IF( INFO.EQ.0 .AND. ( LWORK.GE.1 .OR. LQUERY ) ) THEN ++ IF( INFO.EQ.0 ) THEN + MAXWRK = 0 + MM = M + IF( M.GE.N .AND. M.GE.MNTHR ) THEN +@@ -235,19 +234,18 @@ + MINWRK = MAX( MINWRK, 1 ) + MAXWRK = MAX( MINWRK, MAXWRK ) + WORK( 1 ) = MAXWRK ++ IF( LWORK.LT.MINWRK .AND. .NOT.LQUERY ) ++ $ INFO = -12 + END IF + * +- IF( LWORK.LT.MINWRK .AND. .NOT.LQUERY ) +- $ INFO = -12 ++* Quick returns ++* + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'CGELSS', -INFO ) + RETURN + ELSE IF( LQUERY ) THEN + RETURN + END IF +-* +-* Quick return if possible +-* + IF( M.EQ.0 .OR. N.EQ.0 ) THEN + RANK = 0 + RETURN +@@ -512,8 +510,8 @@ + DO 40 I = 1, NRHS, CHUNK + BL = MIN( NRHS-I+1, CHUNK ) + CALL CGEMM( 'C', 'N', M, BL, M, CONE, WORK( IL ), LDWORK, +- $ B( 1, I ), LDB, CZERO, WORK( IWORK ), N ) +- CALL CLACPY( 'G', M, BL, WORK( IWORK ), N, B( 1, I ), ++ $ B( 1, I ), LDB, CZERO, WORK( IWORK ), M ) ++ CALL CLACPY( 'G', M, BL, WORK( IWORK ), M, B( 1, I ), + $ LDB ) + 40 CONTINUE + ELSE +diff -uNr LAPACK.orig/SRC/cgesdd.f LAPACK/SRC/cgesdd.f +--- LAPACK.orig/SRC/cgesdd.f Thu Nov 11 20:32:54 1999 ++++ LAPACK/SRC/cgesdd.f Fri May 25 16:08:03 2001 +@@ -1,10 +1,11 @@ +- SUBROUTINE CGESDD( JOBZ, M, N, A, LDA, S, U, LDU, VT, LDVT, WORK, +- $ LWORK, RWORK, IWORK, INFO ) ++ SUBROUTINE CGESDD( JOBZ, M, N, A, LDA, S, U, LDU, VT, LDVT, ++ $ WORK, LWORK, RWORK, IWORK, INFO ) + * + * -- LAPACK driver routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* October 31, 1999 ++* June 30, 1999 ++* 8-15-00: Improve consistency of WS calculations (eca) + * + * .. Scalar Arguments .. + CHARACTER JOBZ +@@ -119,12 +120,14 @@ + * if JOBZ = 'S' or 'A', + * LWORK >= min(M,N)*min(M,N)+2*min(M,N)+max(M,N). + * For good performance, LWORK should generally be larger. +-* If LWORK < 0 but other input arguments are legal, WORK(1) +-* returns the optimal LWORK. ++* ++* If LWORK = -1, a workspace query is assumed. The optimal ++* size for the WORK array is calculated and stored in WORK(1), ++* and no other work except argument checking is performed. + * + * RWORK (workspace) REAL array, dimension (LRWORK) +-* If JOBZ = 'N', LRWORK >= 7*min(M,N). +-* Otherwise, LRWORK >= 5*min(M,N)*min(M,N) + 5*min(M,N) ++* If JOBZ = 'N', LRWORK >= 5*min(M,N). ++* Otherwise, LRWORK >= 5*min(M,N)*min(M,N) + 7*min(M,N) + * + * IWORK (workspace) INTEGER array, dimension (8*min(M,N)) + * +@@ -143,14 +146,16 @@ + * ===================================================================== + * + * .. Parameters .. ++ INTEGER LQUERV ++ PARAMETER ( LQUERV = -1 ) + COMPLEX CZERO, CONE +- PARAMETER ( CZERO = ( 0.0E0, 0.0E0 ), +- $ CONE = ( 1.0E0, 0.0E0 ) ) ++ PARAMETER ( CZERO = ( 0.0E+0, 0.0E+0 ), ++ $ CONE = ( 1.0E+0, 0.0E+0 ) ) + REAL ZERO, ONE +- PARAMETER ( ZERO = 0.0E0, ONE = 1.0E0 ) ++ PARAMETER ( ZERO = 0.0E+0, ONE = 1.0E+0 ) + * .. + * .. Local Scalars .. +- LOGICAL LQUERY, WNTQA, WNTQAS, WNTQN, WNTQO, WNTQS ++ LOGICAL WNTQA, WNTQAS, WNTQN, WNTQO, WNTQS + INTEGER BLK, CHUNK, I, IE, IERR, IL, IR, IRU, IRVT, + $ ISCL, ITAU, ITAUP, ITAUQ, IU, IVT, LDWKVT, + $ LDWRKL, LDWRKR, LDWRKU, MAXWRK, MINMN, MINWRK, +@@ -162,15 +167,17 @@ + REAL DUM( 1 ) + * .. + * .. External Subroutines .. +- EXTERNAL CGEBRD, CGELQF, CGEMM, CGEQRF, CLACP2, CLACPY, +- $ CLACRM, CLARCM, CLASCL, CLASET, CUNGBR, CUNGLQ, +- $ CUNGQR, CUNMBR, SBDSDC, SLASCL, XERBLA ++ EXTERNAL CGEBRD, CGELQF, CGEMM, CGEQRF, ++ $ CLACP2, CLACPY, CLACRM, CLARCM, ++ $ CLASCL, CLASET, CUNGBR, CUNGLQ, ++ $ CUNGQR, CUNMBR, SBDSDC, SLASCL, ++ $ XERBLA + * .. + * .. External Functions .. + LOGICAL LSAME + INTEGER ILAENV + REAL CLANGE, SLAMCH +- EXTERNAL CLANGE, ILAENV, LSAME, SLAMCH ++ EXTERNAL CLANGE, SLAMCH, ILAENV, LSAME + * .. + * .. Intrinsic Functions .. + INTRINSIC INT, MAX, MIN, SQRT +@@ -181,8 +188,8 @@ + * + INFO = 0 + MINMN = MIN( M, N ) +- MNTHR1 = INT( MINMN*17.0E0 / 9.0E0 ) +- MNTHR2 = INT( MINMN*5.0E0 / 3.0E0 ) ++ MNTHR1 = INT( MINMN*17.0 / 9.0 ) ++ MNTHR2 = INT( MINMN*5.0 / 3.0 ) + WNTQA = LSAME( JOBZ, 'A' ) + WNTQS = LSAME( JOBZ, 'S' ) + WNTQAS = WNTQA .OR. WNTQS +@@ -190,7 +197,6 @@ + WNTQN = LSAME( JOBZ, 'N' ) + MINWRK = 1 + MAXWRK = 1 +- LQUERY = ( LWORK.EQ.-1 ) + * + IF( .NOT.( WNTQA .OR. WNTQS .OR. WNTQO .OR. WNTQN ) ) THEN + INFO = -1 +@@ -221,19 +227,21 @@ + IF( M.GE.N ) THEN + * + * There is no complex work space needed for bidiagonal SVD +-* The real work space needed for bidiagonal SVD is BDSPAC, +-* BDSPAC = 3*N*N + 4*N ++* The real work space needed for bidiagonal SVD is BDSPAC ++* for computing singular values and singular vectors; BDSPAN ++* for computing singular values only. ++* BDSPAC = 5*N*N + 7*N ++* BDSPAN = MAX(7*N+4, 3*N+2+SMLSIZ*(SMLSIZ+8)) + * + IF( M.GE.MNTHR1 ) THEN + IF( WNTQN ) THEN + * + * Path 1 (M much larger than N, JOBZ='N') + * +- WRKBL = N + N*ILAENV( 1, 'CGEQRF', ' ', M, N, -1, +- $ -1 ) +- WRKBL = MAX( WRKBL, 2*N+2*N* +- $ ILAENV( 1, 'CGEBRD', ' ', N, N, -1, -1 ) ) +- MAXWRK = WRKBL ++ MAXWRK = N + N*ILAENV( 1, 'CGEQRF', ' ', M, N, -1, ++ $ -1 ) ++ MAXWRK = MAX( MAXWRK, 2*N+2*N* ++ $ ILAENV( 1, 'CGEBRD', ' ', N, N, -1, -1 ) ) + MINWRK = 3*N + ELSE IF( WNTQO ) THEN + * +@@ -335,8 +343,11 @@ + ELSE + * + * There is no complex work space needed for bidiagonal SVD +-* The real work space needed for bidiagonal SVD is BDSPAC, +-* BDSPAC = 3*M*M + 4*M ++* The real work space needed for bidiagonal SVD is BDSPAC ++* for computing singular values and singular vectors; BDSPAN ++* for computing singular values only. ++* BDSPAC = 5*M*M + 7*M ++* BDSPAN = MAX(7*M+4, 3*M+2+SMLSIZ*(SMLSIZ+8)) + * + IF( N.GE.MNTHR1 ) THEN + IF( WNTQN ) THEN +@@ -447,24 +458,21 @@ + END IF + END IF + MAXWRK = MAX( MAXWRK, MINWRK ) ++ END IF ++ IF( INFO.EQ.0 ) THEN + WORK( 1 ) = MAXWRK ++ IF( LWORK.LT.MINWRK .AND. LWORK.NE.LQUERV ) ++ $ INFO = -13 + END IF + * +- IF( LWORK.LT.MINWRK .AND. .NOT.LQUERY ) THEN +- INFO = -13 +- END IF ++* Quick returns ++* + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'CGESDD', -INFO ) + RETURN +- ELSE IF( LQUERY ) THEN +- RETURN + END IF +-* +-* Quick return if possible +-* ++ IF( LWORK.EQ.LQUERV ) RETURN + IF( M.EQ.0 .OR. N.EQ.0 ) THEN +- IF( LWORK.GE.1 ) +- $ WORK( 1 ) = ONE + RETURN + END IF + * +@@ -529,7 +537,7 @@ + * + * Perform bidiagonal SVD, compute singular values only + * (CWorkspace: 0) +-* (RWorkspace: need BDSPAC) ++* (RWorkspace: need BDSPAN) + * + CALL SBDSDC( 'U', 'N', N, S, RWORK( IE ), DUM, 1, DUM, 1, + $ DUM, IDUM, RWORK( NRWORK ), IWORK, INFO ) +@@ -844,7 +852,7 @@ + * + * Compute singular values only + * (Cworkspace: 0) +-* (Rworkspace: need BDSPAC) ++* (Rworkspace: need BDSPAN) + * + CALL SBDSDC( 'U', 'N', N, S, RWORK( IE ), DUM, 1, DUM, 1, + $ DUM, IDUM, RWORK( NRWORK ), IWORK, INFO ) +@@ -1040,7 +1048,7 @@ + * + * Compute singular values only + * (Cworkspace: 0) +-* (Rworkspace: need BDSPAC) ++* (Rworkspace: need BDSPAN) + * + CALL SBDSDC( 'U', 'N', N, S, RWORK( IE ), DUM, 1, DUM, 1, + $ DUM, IDUM, RWORK( NRWORK ), IWORK, INFO ) +@@ -1205,8 +1213,8 @@ + ELSE + * + * A has more columns than rows. If A has sufficiently more +-* columns than rows, first reduce using the LQ decomposition +-* (if sufficient workspace available) ++* columns than rows, first reduce using the LQ decomposition (if ++* sufficient workspace available) + * + IF( N.GE.MNTHR1 ) THEN + * +@@ -1245,7 +1253,7 @@ + * + * Perform bidiagonal SVD, compute singular values only + * (CWorkspace: 0) +-* (RWorkspace: need BDSPAC) ++* (RWorkspace: need BDSPAN) + * + CALL SBDSDC( 'U', 'N', M, S, RWORK( IE ), DUM, 1, DUM, 1, + $ DUM, IDUM, RWORK( NRWORK ), IWORK, INFO ) +@@ -1531,8 +1539,8 @@ + * (CWorkspace: need M*M) + * (RWorkspace: 0) + * +- CALL CGEMM( 'N', 'N', M, N, M, CONE, WORK( IVT ), LDWKVT, +- $ VT, LDVT, CZERO, A, LDA ) ++ CALL CGEMM( 'N', 'N', M, N, M, CONE, WORK( IVT ), ++ $ LDWKVT, VT, LDVT, CZERO, A, LDA ) + * + * Copy right singular vectors of A from A to VT + * +@@ -1567,7 +1575,7 @@ + * + * Compute singular values only + * (Cworkspace: 0) +-* (Rworkspace: need BDSPAC) ++* (Rworkspace: need BDSPAN) + * + CALL SBDSDC( 'L', 'N', M, S, RWORK( IE ), DUM, 1, DUM, 1, + $ DUM, IDUM, RWORK( NRWORK ), IWORK, INFO ) +@@ -1763,7 +1771,7 @@ + * + * Compute singular values only + * (Cworkspace: 0) +-* (Rworkspace: need BDSPAC) ++* (Rworkspace: need BDSPAN) + * + CALL SBDSDC( 'L', 'N', M, S, RWORK( IE ), DUM, 1, DUM, 1, + $ DUM, IDUM, RWORK( NRWORK ), IWORK, INFO ) +@@ -1934,9 +1942,15 @@ + IF( ANRM.GT.BIGNUM ) + $ CALL SLASCL( 'G', 0, 0, BIGNUM, ANRM, MINMN, 1, S, MINMN, + $ IERR ) ++ IF( INFO.NE.0 .AND. ANRM.GT.BIGNUM ) ++ $ CALL SLASCL( 'G', 0, 0, BIGNUM, ANRM, MINMN-1, 1, ++ $ RWORK( IE ), MINMN, IERR ) + IF( ANRM.LT.SMLNUM ) + $ CALL SLASCL( 'G', 0, 0, SMLNUM, ANRM, MINMN, 1, S, MINMN, + $ IERR ) ++ IF( INFO.NE.0 .AND. ANRM.LT.SMLNUM ) ++ $ CALL SLASCL( 'G', 0, 0, SMLNUM, ANRM, MINMN-1, 1, ++ $ RWORK( IE ), MINMN, IERR ) + END IF + * + * Return optimal workspace in WORK(1) +diff -uNr LAPACK.orig/SRC/cgesvd.f LAPACK/SRC/cgesvd.f +--- LAPACK.orig/SRC/cgesvd.f Thu Nov 4 14:24:09 1999 ++++ LAPACK/SRC/cgesvd.f Fri May 25 16:08:29 2001 +@@ -4,7 +4,8 @@ + * -- LAPACK driver routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* October 31, 1999 ++* June 30, 1999 ++* 8-15-00: Improve consistency of WS calculations (eca) + * + * .. Scalar Arguments .. + CHARACTER JOBU, JOBVT +@@ -114,12 +115,12 @@ + * LWORK >= 2*MIN(M,N)+MAX(M,N). + * For good performance, LWORK should generally be larger. + * +-* If LWORK = -1, then a workspace query is assumed; the routine +-* only calculates the optimal size of the WORK array, returns +-* this value as the first entry of the WORK array, and no error +-* message related to LWORK is issued by XERBLA. ++* If LWORK = -1, a workspace query is assumed. The optimal ++* size for the WORK array is calculated and stored in WORK(1), ++* and no other work except argument checking is performed. + * +-* RWORK (workspace) REAL array, dimension (5*min(M,N)) ++* RWORK (workspace) REAL array, dimension ++* (5*min(M,N)) + * On exit, if INFO > 0, RWORK(1:MIN(M,N)-1) contains the + * unconverged superdiagonal elements of an upper bidiagonal + * matrix B whose diagonal is in S (not necessarily sorted). +@@ -137,6 +138,8 @@ + * ===================================================================== + * + * .. Parameters .. ++ INTEGER LQUERV ++ PARAMETER ( LQUERV = -1 ) + COMPLEX CZERO, CONE + PARAMETER ( CZERO = ( 0.0E0, 0.0E0 ), + $ CONE = ( 1.0E0, 0.0E0 ) ) +@@ -144,8 +147,8 @@ + PARAMETER ( ZERO = 0.0E0, ONE = 1.0E0 ) + * .. + * .. Local Scalars .. +- LOGICAL LQUERY, WNTUA, WNTUAS, WNTUN, WNTUO, WNTUS, +- $ WNTVA, WNTVAS, WNTVN, WNTVO, WNTVS ++ LOGICAL WNTUA, WNTUAS, WNTUN, WNTUO, WNTUS, WNTVA, ++ $ WNTVAS, WNTVN, WNTVO, WNTVS + INTEGER BLK, CHUNK, I, IE, IERR, IR, IRWORK, ISCL, + $ ITAU, ITAUP, ITAUQ, IU, IWORK, LDWRKR, LDWRKU, + $ MAXWRK, MINMN, MINWRK, MNTHR, NCU, NCVT, NRU, +@@ -188,7 +191,7 @@ + WNTVO = LSAME( JOBVT, 'O' ) + WNTVN = LSAME( JOBVT, 'N' ) + MINWRK = 1 +- LQUERY = ( LWORK.EQ.-1 ) ++ MAXWRK = 1 + * + IF( .NOT.( WNTUA .OR. WNTUS .OR. WNTUO .OR. WNTUN ) ) THEN + INFO = -1 +@@ -216,8 +219,7 @@ + * real workspace. NB refers to the optimal block size for the + * immediately following subroutine, as returned by ILAENV.) + * +- IF( INFO.EQ.0 .AND. ( LWORK.GE.1 .OR. LQUERY ) .AND. M.GT.0 .AND. +- $ N.GT.0 ) THEN ++ IF( INFO.EQ.0 .AND. M.GT.0 .AND. N.GT.0 ) THEN + IF( M.GE.N ) THEN + * + * Space needed for CBDSQR is BDSPAC = 5*N +@@ -543,24 +545,21 @@ + MAXWRK = MAX( MINWRK, MAXWRK ) + END IF + END IF ++ END IF ++ IF( INFO.EQ.0 ) THEN + WORK( 1 ) = MAXWRK ++ IF( LWORK.LT.MINWRK .AND. LWORK.NE.LQUERV ) ++ $ INFO = -13 + END IF + * +- IF( LWORK.LT.MINWRK .AND. .NOT.LQUERY ) THEN +- INFO = -13 +- END IF ++* Quick returns ++* + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'CGESVD', -INFO ) + RETURN +- ELSE IF( LQUERY ) THEN +- RETURN + END IF +-* +-* Quick return if possible +-* ++ IF( LWORK.EQ.LQUERV ) RETURN + IF( M.EQ.0 .OR. N.EQ.0 ) THEN +- IF( LWORK.GE.1 ) +- $ WORK( 1 ) = ONE + RETURN + END IF + * +diff -uNr LAPACK.orig/SRC/cggbak.f LAPACK/SRC/cggbak.f +--- LAPACK.orig/SRC/cggbak.f Thu Nov 4 14:24:10 1999 ++++ LAPACK/SRC/cggbak.f Fri May 25 16:09:01 2001 +@@ -4,7 +4,7 @@ + * -- LAPACK routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* September 30, 1994 ++* February 1, 2001 + * + * .. Scalar Arguments .. + CHARACTER JOB, SIDE +@@ -109,10 +109,15 @@ + INFO = -3 + ELSE IF( ILO.LT.1 ) THEN + INFO = -4 +- ELSE IF( IHI.LT.ILO .OR. IHI.GT.MAX( 1, N ) ) THEN ++ ELSE IF( N.EQ.0 .AND. IHI.EQ.0 .AND. ILO.NE.1 ) THEN ++ INFO = -4 ++ ELSE IF( N.GT.0 .AND. ( IHI.LT.ILO .OR. IHI.GT.MAX( 1, N ) ) ) ++ $ THEN ++ INFO = -5 ++ ELSE IF( N.EQ.0 .AND. ILO.EQ.1 .AND. IHI.NE.0 ) THEN + INFO = -5 + ELSE IF( M.LT.0 ) THEN +- INFO = -6 ++ INFO = -8 + ELSE IF( LDV.LT.MAX( 1, N ) ) THEN + INFO = -10 + END IF +diff -uNr LAPACK.orig/SRC/cggbal.f LAPACK/SRC/cggbal.f +--- LAPACK.orig/SRC/cggbal.f Thu Nov 4 14:24:10 1999 ++++ LAPACK/SRC/cggbal.f Fri May 25 16:09:22 2001 +@@ -4,7 +4,7 @@ + * -- LAPACK routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* September 30, 1994 ++* April 12, 2001 + * + * .. Scalar Arguments .. + CHARACTER JOB +@@ -150,7 +150,7 @@ + ELSE IF( LDA.LT.MAX( 1, N ) ) THEN + INFO = -4 + ELSE IF( LDB.LT.MAX( 1, N ) ) THEN +- INFO = -5 ++ INFO = -6 + END IF + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'CGGBAL', -INFO ) +@@ -197,8 +197,8 @@ + IF( L.NE.1 ) + $ GO TO 30 + * +- RSCALE( 1 ) = 1 +- LSCALE( 1 ) = 1 ++ RSCALE( 1 ) = ONE ++ LSCALE( 1 ) = ONE + GO TO 190 + * + 30 CONTINUE +@@ -256,7 +256,7 @@ + * Permute rows M and I + * + 160 CONTINUE +- LSCALE( M ) = I ++ LSCALE( M ) = REAL( I ) + IF( I.EQ.M ) + $ GO TO 170 + CALL CSWAP( N-K+1, A( I, K ), LDA, A( M, K ), LDA ) +@@ -265,7 +265,7 @@ + * Permute columns M and J + * + 170 CONTINUE +- RSCALE( M ) = J ++ RSCALE( M ) = REAL( J ) + IF( J.EQ.M ) + $ GO TO 180 + CALL CSWAP( L, A( 1, J ), 1, A( 1, M ), 1 ) +@@ -437,7 +437,7 @@ + DO 360 I = ILO, IHI + IRAB = ICAMAX( N-ILO+1, A( I, ILO ), LDA ) + RAB = ABS( A( I, IRAB+ILO-1 ) ) +- IRAB = ICAMAX( N-ILO+1, B( I, ILO ), LDA ) ++ IRAB = ICAMAX( N-ILO+1, B( I, ILO ), LDB ) + RAB = MAX( RAB, ABS( B( I, IRAB+ILO-1 ) ) ) + LRAB = INT( LOG10( RAB+SFMIN ) / BASL+ONE ) + IR = LSCALE( I ) + SIGN( HALF, LSCALE( I ) ) +diff -uNr LAPACK.orig/SRC/cgges.f LAPACK/SRC/cgges.f +--- LAPACK.orig/SRC/cgges.f Thu Nov 4 14:26:17 1999 ++++ LAPACK/SRC/cgges.f Fri May 25 16:09:43 2001 +@@ -6,6 +6,7 @@ + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University + * June 30, 1999 ++* 8-15-00: Improve consistency of WS calculations (eca) + * + * .. Scalar Arguments .. + CHARACTER JOBVSL, JOBVSR, SORT +@@ -145,10 +146,9 @@ + * The dimension of the array WORK. LWORK >= max(1,2*N). + * For good performance, LWORK must generally be larger. + * +-* If LWORK = -1, then a workspace query is assumed; the routine +-* only calculates the optimal size of the WORK array, returns +-* this value as the first entry of the WORK array, and no error +-* message related to LWORK is issued by XERBLA. ++* If LWORK = -1, a workspace query is assumed. The optimal ++* size for the WORK array is calculated and stored in WORK(1), ++* and no other work except argument checking is performed. + * + * RWORK (workspace) REAL array, dimension (8*N) + * +@@ -173,6 +173,8 @@ + * ===================================================================== + * + * .. Parameters .. ++ INTEGER LQUERV ++ PARAMETER ( LQUERV = -1 ) + REAL ZERO, ONE + PARAMETER ( ZERO = 0.0E0, ONE = 1.0E0 ) + COMPLEX CZERO, CONE +@@ -181,7 +183,7 @@ + * .. + * .. Local Scalars .. + LOGICAL CURSL, ILASCL, ILBSCL, ILVSL, ILVSR, LASTSL, +- $ LQUERY, WANTST ++ $ WANTST + INTEGER I, ICOLS, IERR, IHI, IJOBVL, IJOBVR, ILEFT, + $ ILO, IRIGHT, IROWS, IRWRK, ITAU, IWRK, LWKMIN, + $ LWKOPT +@@ -237,7 +239,6 @@ + * Test the input arguments + * + INFO = 0 +- LQUERY = ( LWORK.EQ.-1 ) + IF( IJOBVL.LE.0 ) THEN + INFO = -1 + ELSE IF( IJOBVR.LE.0 ) THEN +@@ -264,7 +265,7 @@ + * following subroutine, as returned by ILAENV.) + * + LWKMIN = 1 +- IF( INFO.EQ.0 .AND. ( LWORK.GE.1 .OR. LQUERY ) ) THEN ++ IF( INFO.EQ.0 ) THEN + LWKMIN = MAX( 1, 2*N ) + LWKOPT = N + N*ILAENV( 1, 'CGEQRF', ' ', N, 1, N, 0 ) + IF( ILVSL ) THEN +@@ -272,21 +273,17 @@ + $ -1 ) ) + END IF + WORK( 1 ) = LWKOPT ++ IF( LWORK.LT.LWKMIN .AND. LWORK.NE.LQUERV ) ++ $ INFO = -18 + END IF + * +- IF( LWORK.LT.LWKMIN .AND. .NOT.LQUERY ) +- $ INFO = -18 ++* Quick return if possible + * + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'CGGES ', -INFO ) + RETURN +- ELSE IF( LQUERY ) THEN +- RETURN + END IF +-* +-* Quick return if possible +-* +- WORK( 1 ) = LWKOPT ++ IF( LWORK.EQ.LQUERV ) RETURN + IF( N.EQ.0 ) THEN + SDIM = 0 + RETURN +diff -uNr LAPACK.orig/SRC/cggesx.f LAPACK/SRC/cggesx.f +--- LAPACK.orig/SRC/cggesx.f Thu Nov 4 14:26:17 1999 ++++ LAPACK/SRC/cggesx.f Fri May 25 16:10:00 2001 +@@ -7,6 +7,7 @@ + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University + * June 30, 1999 ++* 8-15-00: Do WS calculations if LWORK = -1 (eca) + * + * .. Scalar Arguments .. + CHARACTER JOBVSL, JOBVSR, SENSE, SORT +@@ -167,6 +168,10 @@ + * If SENSE = 'E', 'V', or 'B', + * LWORK >= MAX(2*N, 2*SDIM*(N-SDIM)). + * ++* If LWORK = -1, a workspace query is assumed. The optimal ++* size for the WORK array is calculated and stored in WORK(1), ++* and no other work except argument checking is performed. ++* + * RWORK (workspace) REAL array, dimension ( 8*N ) + * Real workspace. + * +@@ -198,6 +203,8 @@ + * ===================================================================== + * + * .. Parameters .. ++ INTEGER LQUERV ++ PARAMETER ( LQUERV = -1 ) + REAL ZERO, ONE + PARAMETER ( ZERO = 0.0E+0, ONE = 1.0E+0 ) + COMPLEX CZERO, CONE +@@ -304,14 +311,22 @@ + * following subroutine, as returned by ILAENV.) + * + MINWRK = 1 +- IF( INFO.EQ.0 .AND. LWORK.GE.1 ) THEN ++ IF( INFO.EQ.0 ) THEN + MINWRK = MAX( 1, 2*N ) + MAXWRK = N + N*ILAENV( 1, 'CGEQRF', ' ', N, 1, N, 0 ) + IF( ILVSL ) THEN + MAXWRK = MAX( MAXWRK, N+N*ILAENV( 1, 'CUNGQR', ' ', N, 1, N, + $ -1 ) ) + END IF ++* ++* Estimate the workspace needed by CTGSEN. ++* ++ IF( WANTST ) THEN ++ MAXWRK = MAX( MAXWRK, (N*N+1)/2 ) ++ END IF + WORK( 1 ) = MAXWRK ++ IF( LWORK.LT.MINWRK .AND. LWORK.NE.LQUERV ) ++ $ INFO = -21 + END IF + IF( .NOT.WANTSN ) THEN + LIWMIN = N+2 +@@ -319,21 +334,18 @@ + LIWMIN = 1 + END IF + IWORK( 1 ) = LIWMIN +-* +- IF( INFO.EQ.0 .AND. LWORK.LT.MINWRK ) THEN +- INFO = -21 +- ELSE IF( INFO.EQ.0 .AND. IJOB.GE.1 ) THEN ++ IF( INFO.EQ.0 .AND. IJOB.GE.1 ) THEN + IF( LIWORK.LT.LIWMIN ) + $ INFO = -24 + END IF + * ++* Quick returns ++* + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'CGGESX', -INFO ) + RETURN + END IF +-* +-* Quick return if possible +-* ++ IF( LWORK.EQ.LQUERV ) RETURN + IF( N.EQ.0 ) THEN + SDIM = 0 + RETURN +diff -uNr LAPACK.orig/SRC/cggev.f LAPACK/SRC/cggev.f +--- LAPACK.orig/SRC/cggev.f Thu Nov 4 14:26:17 1999 ++++ LAPACK/SRC/cggev.f Fri May 25 16:10:19 2001 +@@ -5,6 +5,7 @@ + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University + * June 30, 1999 ++* 8-15-00: Improve consistency of WS calculations (eca) + * + * .. Scalar Arguments .. + CHARACTER JOBVL, JOBVR +@@ -113,10 +114,9 @@ + * The dimension of the array WORK. LWORK >= max(1,2*N). + * For good performance, LWORK must generally be larger. + * +-* If LWORK = -1, then a workspace query is assumed; the routine +-* only calculates the optimal size of the WORK array, returns +-* this value as the first entry of the WORK array, and no error +-* message related to LWORK is issued by XERBLA. ++* If LWORK = -1, a workspace query is assumed. The optimal ++* size for the WORK array is calculated and stored in WORK(1), ++* and no other work except argument checking is performed. + * + * RWORK (workspace/output) REAL array, dimension (8*N) + * +@@ -133,6 +133,8 @@ + * ===================================================================== + * + * .. Parameters .. ++ INTEGER LQUERV ++ PARAMETER ( LQUERV = -1 ) + REAL ZERO, ONE + PARAMETER ( ZERO = 0.0E0, ONE = 1.0E0 ) + COMPLEX CZERO, CONE +@@ -140,7 +142,7 @@ + $ CONE = ( 1.0E0, 0.0E0 ) ) + * .. + * .. Local Scalars .. +- LOGICAL ILASCL, ILBSCL, ILV, ILVL, ILVR, LQUERY ++ LOGICAL ILASCL, ILBSCL, ILV, ILVL, ILVR + CHARACTER CHTEMP + INTEGER ICOLS, IERR, IHI, IJOBVL, IJOBVR, ILEFT, ILO, + $ IN, IRIGHT, IROWS, IRWRK, ITAU, IWRK, JC, JR, +@@ -202,7 +204,6 @@ + * Test the input arguments + * + INFO = 0 +- LQUERY = ( LWORK.EQ.-1 ) + IF( IJOBVL.LE.0 ) THEN + INFO = -1 + ELSE IF( IJOBVR.LE.0 ) THEN +@@ -228,25 +229,21 @@ + * computed assuming ILO = 1 and IHI = N, the worst case.) + * + LWKMIN = 1 +- IF( INFO.EQ.0 .AND. ( LWORK.GE.1 .OR. LQUERY ) ) THEN ++ IF( INFO.EQ.0 ) THEN + LWKOPT = N + N*ILAENV( 1, 'CGEQRF', ' ', N, 1, N, 0 ) + LWKMIN = MAX( 1, 2*N ) + WORK( 1 ) = LWKOPT ++ IF( LWORK.LT.LWKMIN .AND. LWORK.NE.LQUERV ) ++ $ INFO = -15 + END IF + * +- IF( LWORK.LT.LWKMIN .AND. .NOT.LQUERY ) +- $ INFO = -15 ++* Quick returns + * + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'CGGEV ', -INFO ) + RETURN +- ELSE IF( LQUERY ) THEN +- RETURN + END IF +-* +-* Quick return if possible +-* +- WORK( 1 ) = LWKOPT ++ IF( LWORK.EQ.LQUERV ) RETURN + IF( N.EQ.0 ) + $ RETURN + * +diff -uNr LAPACK.orig/SRC/cggevx.f LAPACK/SRC/cggevx.f +--- LAPACK.orig/SRC/cggevx.f Thu Nov 4 14:26:17 1999 ++++ LAPACK/SRC/cggevx.f Fri May 25 16:11:36 2001 +@@ -7,6 +7,7 @@ + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University + * June 30, 1999 ++* 8-15-00: Improve consistency of WS calculations (eca) + * + * .. Scalar Arguments .. + CHARACTER BALANC, JOBVL, JOBVR, SENSE +@@ -194,10 +195,9 @@ + * If SENSE = 'N' or 'E', LWORK >= 2*N. + * If SENSE = 'V' or 'B', LWORK >= 2*N*N+2*N. + * +-* If LWORK = -1, then a workspace query is assumed; the routine +-* only calculates the optimal size of the WORK array, returns +-* this value as the first entry of the WORK array, and no error +-* message related to LWORK is issued by XERBLA. ++* If LWORK = -1, a workspace query is assumed. The optimal ++* size for the WORK array is calculated and stored in WORK(1), ++* and no other work except argument checking is performed. + * + * RWORK (workspace) REAL array, dimension (6*N) + * Real workspace. +@@ -247,6 +247,8 @@ + * ===================================================================== + * + * .. Parameters .. ++ INTEGER LQUERV ++ PARAMETER ( LQUERV = -1 ) + REAL ZERO, ONE + PARAMETER ( ZERO = 0.0E+0, ONE = 1.0E+0 ) + COMPLEX CZERO, CONE +@@ -254,8 +256,8 @@ + $ CONE = ( 1.0E+0, 0.0E+0 ) ) + * .. + * .. Local Scalars .. +- LOGICAL ILASCL, ILBSCL, ILV, ILVL, ILVR, LQUERY, +- $ WANTSB, WANTSE, WANTSN, WANTSV ++ LOGICAL ILASCL, ILBSCL, ILV, ILVL, ILVR, WANTSB, ++ $ WANTSE, WANTSN, WANTSV + CHARACTER CHTEMP + INTEGER I, ICOLS, IERR, IJOBVL, IJOBVR, IN, IROWS, + $ ITAU, IWRK, IWRK1, J, JC, JR, M, MAXWRK, MINWRK +@@ -321,7 +323,6 @@ + * Test the input arguments + * + INFO = 0 +- LQUERY = ( LWORK.EQ.-1 ) + IF( .NOT.( LSAME( BALANC, 'N' ) .OR. LSAME( BALANC, + $ 'S' ) .OR. LSAME( BALANC, 'P' ) .OR. LSAME( BALANC, 'B' ) ) ) + $ THEN +@@ -354,7 +355,7 @@ + * computed assuming ILO = 1 and IHI = N, the worst case.) + * + MINWRK = 1 +- IF( INFO.EQ.0 .AND. ( LWORK.GE.1 .OR. LQUERY ) ) THEN ++ IF( INFO.EQ.0 ) THEN + MAXWRK = N + N*ILAENV( 1, 'CGEQRF', ' ', N, 1, N, 0 ) + IF( WANTSE ) THEN + MINWRK = MAX( 1, 2*N ) +@@ -363,21 +364,17 @@ + MAXWRK = MAX( MAXWRK, 2*N*N+2*N ) + END IF + WORK( 1 ) = MAXWRK ++ IF( LWORK.LT.MINWRK .AND. LWORK.NE.LQUERV ) ++ $ INFO = -25 + END IF + * +- IF( LWORK.LT.MINWRK .AND. .NOT.LQUERY ) THEN +- INFO = -25 +- END IF ++* Quick returns + * + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'CGGEVX', -INFO ) + RETURN +- ELSE IF( LQUERY ) THEN +- RETURN + END IF +-* +-* Quick return if possible +-* ++ IF( LWORK.EQ.LQUERV ) RETURN + IF( N.EQ.0 ) + $ RETURN + * +diff -uNr LAPACK.orig/SRC/cgghrd.f LAPACK/SRC/cgghrd.f +--- LAPACK.orig/SRC/cgghrd.f Thu Nov 4 14:25:42 1999 ++++ LAPACK/SRC/cgghrd.f Fri May 25 16:11:54 2001 +@@ -4,7 +4,7 @@ + * -- LAPACK routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* September 30, 1994 ++* April 26, 2001 + * + * .. Scalar Arguments .. + CHARACTER COMPQ, COMPZ +@@ -20,16 +20,29 @@ + * + * CGGHRD reduces a pair of complex matrices (A,B) to generalized upper + * Hessenberg form using unitary transformations, where A is a +-* general matrix and B is upper triangular: Q' * A * Z = H and +-* Q' * B * Z = T, where H is upper Hessenberg, T is upper triangular, +-* and Q and Z are unitary, and ' means conjugate transpose. ++* general matrix and B is upper triangular. The form of the generalized ++* eigenvalue problem is ++* A*x = lambda*B*x, ++* and B is typically made upper triangular by computing its QR ++* factorization and moving the unitary matrix Q to the left side ++* of the equation. ++* ++* This subroutine simultaneously reduces A to a Hessenberg matrix H: ++* Q**H*A*Z = H ++* and transforms B to another upper triangular matrix T: ++* Q**H*B*Z = T ++* in order to reduce the problem to its standard form ++* H*y = lambda*T*y ++* where y = Z**H*x. + * + * The unitary matrices Q and Z are determined as products of Givens + * rotations. They may either be formed explicitly, or they may be + * postmultiplied into input matrices Q1 and Z1, so that +-* +-* Q1 * A * Z1' = (Q1*Q) * H * (Z1*Z)' +-* Q1 * B * Z1' = (Q1*Q) * T * (Z1*Z)' ++* Q1 * A * Z1**H = (Q1*Q) * H * (Z1*Z)**H ++* Q1 * B * Z1**H = (Q1*Q) * T * (Z1*Z)**H ++* If Q1 is the unitary matrix from the QR factorization of B in the ++* original equation A*x = lambda*B*x, then CGGHRD reduces the original ++* problem to generalized Hessenberg form. + * + * Arguments + * ========= +@@ -53,10 +66,11 @@ + * + * ILO (input) INTEGER + * IHI (input) INTEGER +-* It is assumed that A is already upper triangular in rows and +-* columns 1:ILO-1 and IHI+1:N. ILO and IHI are normally set +-* by a previous call to CGGBAL; otherwise they should be set +-* to 1 and N respectively. ++* ILO and IHI mark the rows and columns of A which are to be ++* reduced. It is assumed that A is already upper triangular ++* in rows and columns 1:ILO-1 and IHI+1:N. ILO and IHI are ++* normally set by a previous call to CGGBAL; otherwise they ++* should be set to 1 and N respectively. + * 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0. + * + * A (input/output) COMPLEX array, dimension (LDA, N) +@@ -70,33 +84,28 @@ + * + * B (input/output) COMPLEX array, dimension (LDB, N) + * On entry, the N-by-N upper triangular matrix B. +-* On exit, the upper triangular matrix T = Q' B Z. The ++* On exit, the upper triangular matrix T = Q**H B Z. The + * elements below the diagonal are set to zero. + * + * LDB (input) INTEGER + * The leading dimension of the array B. LDB >= max(1,N). + * + * Q (input/output) COMPLEX array, dimension (LDQ, N) +-* If COMPQ='N': Q is not referenced. +-* If COMPQ='I': on entry, Q need not be set, and on exit it +-* contains the unitary matrix Q, where Q' +-* is the product of the Givens transformations +-* which are applied to A and B on the left. +-* If COMPQ='V': on entry, Q must contain a unitary matrix +-* Q1, and on exit this is overwritten by Q1*Q. ++* On entry, if COMPQ = 'V', the unitary matrix Q1, typically ++* from the QR factorization of B. ++* On exit, if COMPQ='I', the unitary matrix Q, and if ++* COMPQ = 'V', the product Q1*Q. ++* Not referenced if COMPQ='N'. + * + * LDQ (input) INTEGER + * The leading dimension of the array Q. + * LDQ >= N if COMPQ='V' or 'I'; LDQ >= 1 otherwise. + * + * Z (input/output) COMPLEX array, dimension (LDZ, N) +-* If COMPZ='N': Z is not referenced. +-* If COMPZ='I': on entry, Z need not be set, and on exit it +-* contains the unitary matrix Z, which is +-* the product of the Givens transformations +-* which are applied to A and B on the right. +-* If COMPZ='V': on entry, Z must contain a unitary matrix +-* Z1, and on exit this is overwritten by Z1*Z. ++* On entry, if COMPZ = 'V', the unitary matrix Z1. ++* On exit, if COMPZ='I', the unitary matrix Z, and if ++* COMPZ = 'V', the product Z1*Z. ++* Not referenced if COMPZ='N'. + * + * LDZ (input) INTEGER + * The leading dimension of the array Z. +diff -uNr LAPACK.orig/SRC/chbgst.f LAPACK/SRC/chbgst.f +--- LAPACK.orig/SRC/chbgst.f Thu Nov 4 14:23:31 1999 ++++ LAPACK/SRC/chbgst.f Fri May 25 16:12:55 2001 +@@ -4,7 +4,7 @@ + * -- LAPACK routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* June 30, 1999 ++* January 9, 2001 + * + * .. Scalar Arguments .. + CHARACTER UPLO, VECT +@@ -131,7 +131,7 @@ + INFO = -3 + ELSE IF( KA.LT.0 ) THEN + INFO = -4 +- ELSE IF( KB.LT.0 ) THEN ++ ELSE IF( KB.LT.0 .OR. KB.GT.KA ) THEN + INFO = -5 + ELSE IF( LDAB.LT.KA+1 ) THEN + INFO = -7 +diff -uNr LAPACK.orig/SRC/chgeqz.f LAPACK/SRC/chgeqz.f +--- LAPACK.orig/SRC/chgeqz.f Thu Nov 4 14:24:13 1999 ++++ LAPACK/SRC/chgeqz.f Fri May 25 16:12:16 2001 +@@ -1,43 +1,64 @@ +- SUBROUTINE CHGEQZ( JOB, COMPQ, COMPZ, N, ILO, IHI, A, LDA, B, LDB, ++ SUBROUTINE CHGEQZ( JOB, COMPQ, COMPZ, N, ILO, IHI, H, LDH, T, LDT, + $ ALPHA, BETA, Q, LDQ, Z, LDZ, WORK, LWORK, + $ RWORK, INFO ) + * + * -- LAPACK routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* June 30, 1999 ++* May 3, 2001 + * + * .. Scalar Arguments .. + CHARACTER COMPQ, COMPZ, JOB +- INTEGER IHI, ILO, INFO, LDA, LDB, LDQ, LDZ, LWORK, N ++ INTEGER IHI, ILO, INFO, LDH, LDQ, LDT, LDZ, LWORK, N + * .. + * .. Array Arguments .. + REAL RWORK( * ) +- COMPLEX A( LDA, * ), ALPHA( * ), B( LDB, * ), +- $ BETA( * ), Q( LDQ, * ), WORK( * ), Z( LDZ, * ) ++ COMPLEX ALPHA( * ), BETA( * ), H( LDH, * ), ++ $ Q( LDQ, * ), T( LDT, * ), WORK( * ), ++ $ Z( LDZ, * ) + * .. + * + * Purpose + * ======= + * +-* CHGEQZ implements a single-shift version of the QZ +-* method for finding the generalized eigenvalues w(i)=ALPHA(i)/BETA(i) +-* of the equation +-* +-* det( A - w(i) B ) = 0 +-* +-* If JOB='S', then the pair (A,B) is simultaneously +-* reduced to Schur form (i.e., A and B are both upper triangular) by +-* applying one unitary tranformation (usually called Q) on the left and +-* another (usually called Z) on the right. The diagonal elements of +-* A are then ALPHA(1),...,ALPHA(N), and of B are BETA(1),...,BETA(N). +-* +-* If JOB='S' and COMPQ and COMPZ are 'V' or 'I', then the unitary +-* transformations used to reduce (A,B) are accumulated into the arrays +-* Q and Z s.t.: +-* +-* Q(in) A(in) Z(in)* = Q(out) A(out) Z(out)* +-* Q(in) B(in) Z(in)* = Q(out) B(out) Z(out)* ++* CHGEQZ computes the eigenvalues of a complex matrix pair (H,T), ++* where H is an upper Hessenberg matrix and T is upper triangular, ++* using the single-shift QZ method. ++* Matrix pairs of this type are produced by the reduction to ++* generalized upper Hessenberg form of a complex matrix pair (A,B): ++* ++* A = Q1*H*Z1**H, B = Q1*T*Z1**H, ++* ++* as computed by CGGHRD. ++* ++* If JOB='S', then the Hessenberg-triangular pair (H,T) is ++* also reduced to generalized Schur form, ++* ++* H = Q*S*Z**H, T = Q*P*Z**H, ++* ++* where Q and Z are unitary matrices and S and P are upper triangular. ++* ++* Optionally, the unitary matrix Q from the generalized Schur ++* factorization may be postmultiplied into an input matrix Q1, and the ++* unitary matrix Z may be postmultiplied into an input matrix Z1. ++* If Q1 and Z1 are the unitary matrices from CGGHRD that reduced ++* the matrix pair (A,B) to generalized Hessenberg form, then the output ++* matrices Q1*Q and Z1*Z are the unitary factors from the generalized ++* Schur factorization of (A,B): ++* ++* A = (Q1*Q)*S*(Z1*Z)**H, B = (Q1*Q)*P*(Z1*Z)**H. ++* ++* To avoid overflow, eigenvalues of the matrix pair (H,T) ++* (equivalently, of (A,B)) are computed as a pair of complex values ++* (alpha,beta). If beta is nonzero, lambda = alpha / beta is an ++* eigenvalue of the generalized nonsymmetric eigenvalue problem (GNEP) ++* A*x = lambda*B*x ++* and if alpha is nonzero, mu = beta / alpha is an eigenvalue of the ++* alternate form of the GNEP ++* mu*A*y = B*y. ++* The values of alpha and beta for the i-th eigenvalue can be read ++* directly from the generalized Schur form: alpha = S(i,i), ++* beta = P(i,i). + * + * Ref: C.B. Moler & G.W. Stewart, "An Algorithm for Generalized Matrix + * Eigenvalue Problems", SIAM J. Numer. Anal., 10(1973), +@@ -47,83 +68,88 @@ + * ========= + * + * JOB (input) CHARACTER*1 +-* = 'E': compute only ALPHA and BETA. A and B will not +-* necessarily be put into generalized Schur form. +-* = 'S': put A and B into generalized Schur form, as well +-* as computing ALPHA and BETA. ++* = 'E': Compute eigenvalues only; ++* = 'S': Computer eigenvalues and the Schur form. + * + * COMPQ (input) CHARACTER*1 +-* = 'N': do not modify Q. +-* = 'V': multiply the array Q on the right by the conjugate +-* transpose of the unitary tranformation that is +-* applied to the left side of A and B to reduce them +-* to Schur form. +-* = 'I': like COMPQ='V', except that Q will be initialized to +-* the identity first. ++* = 'N': Left Schur vectors (Q) are not computed; ++* = 'I': Q is initialized to the unit matrix and the matrix Q ++* of left Schur vectors of (H,T) is returned; ++* = 'V': Q must contain a unitary matrix Q1 on entry and ++* the product Q1*Q is returned. + * + * COMPZ (input) CHARACTER*1 +-* = 'N': do not modify Z. +-* = 'V': multiply the array Z on the right by the unitary +-* tranformation that is applied to the right side of +-* A and B to reduce them to Schur form. +-* = 'I': like COMPZ='V', except that Z will be initialized to +-* the identity first. ++* = 'N': Right Schur vectors (Z) are not computed; ++* = 'I': Q is initialized to the unit matrix and the matrix Z ++* of right Schur vectors of (H,T) is returned; ++* = 'V': Z must contain a unitary matrix Z1 on entry and ++* the product Z1*Z is returned. + * + * N (input) INTEGER +-* The order of the matrices A, B, Q, and Z. N >= 0. ++* The order of the matrices H, T, Q, and Z. N >= 0. + * + * ILO (input) INTEGER + * IHI (input) INTEGER +-* It is assumed that A is already upper triangular in rows and +-* columns 1:ILO-1 and IHI+1:N. +-* 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0. +-* +-* A (input/output) COMPLEX array, dimension (LDA, N) +-* On entry, the N-by-N upper Hessenberg matrix A. Elements +-* below the subdiagonal must be zero. +-* If JOB='S', then on exit A and B will have been +-* simultaneously reduced to upper triangular form. +-* If JOB='E', then on exit A will have been destroyed. +-* +-* LDA (input) INTEGER +-* The leading dimension of the array A. LDA >= max( 1, N ). +-* +-* B (input/output) COMPLEX array, dimension (LDB, N) +-* On entry, the N-by-N upper triangular matrix B. Elements +-* below the diagonal must be zero. +-* If JOB='S', then on exit A and B will have been +-* simultaneously reduced to upper triangular form. +-* If JOB='E', then on exit B will have been destroyed. ++* ILO and IHI mark the rows and columns of H which are in ++* Hessenberg form. It is assumed that A is already upper ++* triangular in rows and columns 1:ILO-1 and IHI+1:N. ++* If N > 0, 1 <= ILO <= IHI <= N; if N = 0, ILO=1 and IHI=0. ++* ++* H (input/output) COMPLEX array, dimension (LDH, N) ++* On entry, the N-by-N upper Hessenberg matrix H. ++* On exit, if JOB = 'S', H contains the upper triangular ++* matrix S from the generalized Schur factorization. ++* If JOB = 'E', the diagonal of H matches that of S, but ++* the rest of H is unspecified. ++* ++* LDH (input) INTEGER ++* The leading dimension of the array H. LDH >= max( 1, N ). ++* ++* T (input/output) COMPLEX array, dimension (LDT, N) ++* On entry, the N-by-N upper triangular matrix T. ++* On exit, if JOB = 'S', T contains the upper triangular ++* matrix P from the generalized Schur factorization. ++* If JOB = 'E', the diagonal of T matches that of P, but ++* the rest of T is unspecified. + * +-* LDB (input) INTEGER +-* The leading dimension of the array B. LDB >= max( 1, N ). ++* LDT (input) INTEGER ++* The leading dimension of the array T. LDT >= max( 1, N ). + * + * ALPHA (output) COMPLEX array, dimension (N) +-* The diagonal elements of A when the pair (A,B) has been +-* reduced to Schur form. ALPHA(i)/BETA(i) i=1,...,N +-* are the generalized eigenvalues. ++* The complex scalars alpha that define the eigenvalues of ++* GNEP. ALPHA(i) = S(i,i) in the generalized Schur ++* factorization. + * + * BETA (output) COMPLEX array, dimension (N) +-* The diagonal elements of B when the pair (A,B) has been +-* reduced to Schur form. ALPHA(i)/BETA(i) i=1,...,N +-* are the generalized eigenvalues. A and B are normalized +-* so that BETA(1),...,BETA(N) are non-negative real numbers. ++* The real non-negative scalars beta that define the ++* eigenvalues of GNEP. BETA(i) = P(i,i) in the generalized ++* Schur factorization. ++* ++* Together, the quantities alpha = ALPHA(j) and beta = BETA(j) ++* represent the j-th eigenvalue of the matrix pair (A,B), in ++* one of the forms lambda = alpha/beta or mu = beta/alpha. ++* Since either lambda or mu may overflow, they should not, ++* in general, be computed. + * + * Q (input/output) COMPLEX array, dimension (LDQ, N) +-* If COMPQ='N', then Q will not be referenced. +-* If COMPQ='V' or 'I', then the conjugate transpose of the +-* unitary transformations which are applied to A and B on +-* the left will be applied to the array Q on the right. ++* On entry, if COMPZ = 'V', the unitary matrix Q1 used in the ++* reduction of (A,B) to generalized Hessenberg form. ++* On exit, if COMPZ = 'I', the unitary matrix of left Schur ++* vectors of (H,T), and if COMPZ = 'V', the unitary matrix of ++* left Schur vectors of (A,B). ++* Not referenced if COMPZ = 'N'. + * + * LDQ (input) INTEGER + * The leading dimension of the array Q. LDQ >= 1. + * If COMPQ='V' or 'I', then LDQ >= N. + * + * Z (input/output) COMPLEX array, dimension (LDZ, N) +-* If COMPZ='N', then Z will not be referenced. +-* If COMPZ='V' or 'I', then the unitary transformations which +-* are applied to A and B on the right will be applied to the +-* array Z on the right. ++* On entry, if COMPZ = 'V', the unitary matrix Z1 used in the ++* reduction of (A,B) to generalized Hessenberg form. ++* On exit, if COMPZ = 'I', the unitary matrix of right Schur ++* vectors of (H,T), and if COMPZ = 'V', the unitary matrix of ++* right Schur vectors of (A,B). ++* Not referenced if COMPZ = 'N'. + * + * LDZ (input) INTEGER + * The leading dimension of the array Z. LDZ >= 1. +@@ -145,13 +171,12 @@ + * INFO (output) INTEGER + * = 0: successful exit + * < 0: if INFO = -i, the i-th argument had an illegal value +-* = 1,...,N: the QZ iteration did not converge. (A,B) is not ++* = 1,...,N: the QZ iteration did not converge. (H,T) is not + * in Schur form, but ALPHA(i) and BETA(i), + * i=INFO+1,...,N should be correct. +-* = N+1,...,2*N: the shift calculation failed. (A,B) is not ++* = N+1,...,2*N: the shift calculation failed. (H,T) is not + * in Schur form, but ALPHA(i) and BETA(i), + * i=INFO-N+1,...,N should be correct. +-* > 2*N: various "impossible" errors. + * + * Further Details + * =============== +@@ -178,7 +203,7 @@ + REAL ABSB, ANORM, ASCALE, ATOL, BNORM, BSCALE, BTOL, + $ C, SAFMIN, TEMP, TEMP2, TEMPR, ULP + COMPLEX ABI22, AD11, AD12, AD21, AD22, CTEMP, CTEMP2, +- $ CTEMP3, ESHIFT, RTDISC, S, SHIFT, SIGNBC, T, ++ $ CTEMP3, ESHIFT, RTDISC, S, SHIFT, SIGNBC, T1, + $ U12, X + * .. + * .. External Functions .. +@@ -255,9 +280,9 @@ + INFO = -5 + ELSE IF( IHI.GT.N .OR. IHI.LT.ILO-1 ) THEN + INFO = -6 +- ELSE IF( LDA.LT.N ) THEN ++ ELSE IF( LDH.LT.N ) THEN + INFO = -8 +- ELSE IF( LDB.LT.N ) THEN ++ ELSE IF( LDT.LT.N ) THEN + INFO = -10 + ELSE IF( LDQ.LT.1 .OR. ( ILQ .AND. LDQ.LT.N ) ) THEN + INFO = -14 +@@ -293,8 +318,8 @@ + IN = IHI + 1 - ILO + SAFMIN = SLAMCH( 'S' ) + ULP = SLAMCH( 'E' )*SLAMCH( 'B' ) +- ANORM = CLANHS( 'F', IN, A( ILO, ILO ), LDA, RWORK ) +- BNORM = CLANHS( 'F', IN, B( ILO, ILO ), LDB, RWORK ) ++ ANORM = CLANHS( 'F', IN, H( ILO, ILO ), LDH, RWORK ) ++ BNORM = CLANHS( 'F', IN, T( ILO, ILO ), LDT, RWORK ) + ATOL = MAX( SAFMIN, ULP*ANORM ) + BTOL = MAX( SAFMIN, ULP*BNORM ) + ASCALE = ONE / MAX( SAFMIN, ANORM ) +@@ -304,23 +329,23 @@ + * Set Eigenvalues IHI+1:N + * + DO 10 J = IHI + 1, N +- ABSB = ABS( B( J, J ) ) ++ ABSB = ABS( T( J, J ) ) + IF( ABSB.GT.SAFMIN ) THEN +- SIGNBC = CONJG( B( J, J ) / ABSB ) +- B( J, J ) = ABSB ++ SIGNBC = CONJG( T( J, J ) / ABSB ) ++ T( J, J ) = ABSB + IF( ILSCHR ) THEN +- CALL CSCAL( J-1, SIGNBC, B( 1, J ), 1 ) +- CALL CSCAL( J, SIGNBC, A( 1, J ), 1 ) ++ CALL CSCAL( J-1, SIGNBC, T( 1, J ), 1 ) ++ CALL CSCAL( J, SIGNBC, H( 1, J ), 1 ) + ELSE +- A( J, J ) = A( J, J )*SIGNBC ++ H( J, J ) = H( J, J )*SIGNBC + END IF + IF( ILZ ) + $ CALL CSCAL( N, SIGNBC, Z( 1, J ), 1 ) + ELSE +- B( J, J ) = CZERO ++ T( J, J ) = CZERO + END IF +- ALPHA( J ) = A( J, J ) +- BETA( J ) = B( J, J ) ++ ALPHA( J ) = H( J, J ) ++ BETA( J ) = T( J, J ) + 10 CONTINUE + * + * If IHI < ILO, skip QZ steps +@@ -365,22 +390,22 @@ + * Split the matrix if possible. + * + * Two tests: +-* 1: A(j,j-1)=0 or j=ILO +-* 2: B(j,j)=0 ++* 1: H(j,j-1)=0 or j=ILO ++* 2: T(j,j)=0 + * + * Special case: j=ILAST + * + IF( ILAST.EQ.ILO ) THEN + GO TO 60 + ELSE +- IF( ABS1( A( ILAST, ILAST-1 ) ).LE.ATOL ) THEN +- A( ILAST, ILAST-1 ) = CZERO ++ IF( ABS1( H( ILAST, ILAST-1 ) ).LE.ATOL ) THEN ++ H( ILAST, ILAST-1 ) = CZERO + GO TO 60 + END IF + END IF + * +- IF( ABS( B( ILAST, ILAST ) ).LE.BTOL ) THEN +- B( ILAST, ILAST ) = CZERO ++ IF( ABS( T( ILAST, ILAST ) ).LE.BTOL ) THEN ++ T( ILAST, ILAST ) = CZERO + GO TO 50 + END IF + * +@@ -388,30 +413,30 @@ + * + DO 40 J = ILAST - 1, ILO, -1 + * +-* Test 1: for A(j,j-1)=0 or j=ILO ++* Test 1: for H(j,j-1)=0 or j=ILO + * + IF( J.EQ.ILO ) THEN + ILAZRO = .TRUE. + ELSE +- IF( ABS1( A( J, J-1 ) ).LE.ATOL ) THEN +- A( J, J-1 ) = CZERO ++ IF( ABS1( H( J, J-1 ) ).LE.ATOL ) THEN ++ H( J, J-1 ) = CZERO + ILAZRO = .TRUE. + ELSE + ILAZRO = .FALSE. + END IF + END IF + * +-* Test 2: for B(j,j)=0 ++* Test 2: for T(j,j)=0 + * +- IF( ABS( B( J, J ) ).LT.BTOL ) THEN +- B( J, J ) = CZERO ++ IF( ABS( T( J, J ) ).LT.BTOL ) THEN ++ T( J, J ) = CZERO + * + * Test 1a: Check for 2 consecutive small subdiagonals in A + * + ILAZR2 = .FALSE. + IF( .NOT.ILAZRO ) THEN +- IF( ABS1( A( J, J-1 ) )*( ASCALE*ABS1( A( J+1, +- $ J ) ) ).LE.ABS1( A( J, J ) )*( ASCALE*ATOL ) ) ++ IF( ABS1( H( J, J-1 ) )*( ASCALE*ABS1( H( J+1, ++ $ J ) ) ).LE.ABS1( H( J, J ) )*( ASCALE*ATOL ) ) + $ ILAZR2 = .TRUE. + END IF + * +@@ -423,21 +448,21 @@ + * + IF( ILAZRO .OR. ILAZR2 ) THEN + DO 20 JCH = J, ILAST - 1 +- CTEMP = A( JCH, JCH ) +- CALL CLARTG( CTEMP, A( JCH+1, JCH ), C, S, +- $ A( JCH, JCH ) ) +- A( JCH+1, JCH ) = CZERO +- CALL CROT( ILASTM-JCH, A( JCH, JCH+1 ), LDA, +- $ A( JCH+1, JCH+1 ), LDA, C, S ) +- CALL CROT( ILASTM-JCH, B( JCH, JCH+1 ), LDB, +- $ B( JCH+1, JCH+1 ), LDB, C, S ) ++ CTEMP = H( JCH, JCH ) ++ CALL CLARTG( CTEMP, H( JCH+1, JCH ), C, S, ++ $ H( JCH, JCH ) ) ++ H( JCH+1, JCH ) = CZERO ++ CALL CROT( ILASTM-JCH, H( JCH, JCH+1 ), LDH, ++ $ H( JCH+1, JCH+1 ), LDH, C, S ) ++ CALL CROT( ILASTM-JCH, T( JCH, JCH+1 ), LDT, ++ $ T( JCH+1, JCH+1 ), LDT, C, S ) + IF( ILQ ) + $ CALL CROT( N, Q( 1, JCH ), 1, Q( 1, JCH+1 ), 1, + $ C, CONJG( S ) ) + IF( ILAZR2 ) +- $ A( JCH, JCH-1 ) = A( JCH, JCH-1 )*C ++ $ H( JCH, JCH-1 ) = H( JCH, JCH-1 )*C + ILAZR2 = .FALSE. +- IF( ABS1( B( JCH+1, JCH+1 ) ).GE.BTOL ) THEN ++ IF( ABS1( T( JCH+1, JCH+1 ) ).GE.BTOL ) THEN + IF( JCH+1.GE.ILAST ) THEN + GO TO 60 + ELSE +@@ -445,35 +470,35 @@ + GO TO 70 + END IF + END IF +- B( JCH+1, JCH+1 ) = CZERO ++ T( JCH+1, JCH+1 ) = CZERO + 20 CONTINUE + GO TO 50 + ELSE + * +-* Only test 2 passed -- chase the zero to B(ILAST,ILAST) +-* Then process as in the case B(ILAST,ILAST)=0 ++* Only test 2 passed -- chase the zero to T(ILAST,ILAST) ++* Then process as in the case T(ILAST,ILAST)=0 + * + DO 30 JCH = J, ILAST - 1 +- CTEMP = B( JCH, JCH+1 ) +- CALL CLARTG( CTEMP, B( JCH+1, JCH+1 ), C, S, +- $ B( JCH, JCH+1 ) ) +- B( JCH+1, JCH+1 ) = CZERO ++ CTEMP = T( JCH, JCH+1 ) ++ CALL CLARTG( CTEMP, T( JCH+1, JCH+1 ), C, S, ++ $ T( JCH, JCH+1 ) ) ++ T( JCH+1, JCH+1 ) = CZERO + IF( JCH.LT.ILASTM-1 ) +- $ CALL CROT( ILASTM-JCH-1, B( JCH, JCH+2 ), LDB, +- $ B( JCH+1, JCH+2 ), LDB, C, S ) +- CALL CROT( ILASTM-JCH+2, A( JCH, JCH-1 ), LDA, +- $ A( JCH+1, JCH-1 ), LDA, C, S ) ++ $ CALL CROT( ILASTM-JCH-1, T( JCH, JCH+2 ), LDT, ++ $ T( JCH+1, JCH+2 ), LDT, C, S ) ++ CALL CROT( ILASTM-JCH+2, H( JCH, JCH-1 ), LDH, ++ $ H( JCH+1, JCH-1 ), LDH, C, S ) + IF( ILQ ) + $ CALL CROT( N, Q( 1, JCH ), 1, Q( 1, JCH+1 ), 1, + $ C, CONJG( S ) ) +- CTEMP = A( JCH+1, JCH ) +- CALL CLARTG( CTEMP, A( JCH+1, JCH-1 ), C, S, +- $ A( JCH+1, JCH ) ) +- A( JCH+1, JCH-1 ) = CZERO +- CALL CROT( JCH+1-IFRSTM, A( IFRSTM, JCH ), 1, +- $ A( IFRSTM, JCH-1 ), 1, C, S ) +- CALL CROT( JCH-IFRSTM, B( IFRSTM, JCH ), 1, +- $ B( IFRSTM, JCH-1 ), 1, C, S ) ++ CTEMP = H( JCH+1, JCH ) ++ CALL CLARTG( CTEMP, H( JCH+1, JCH-1 ), C, S, ++ $ H( JCH+1, JCH ) ) ++ H( JCH+1, JCH-1 ) = CZERO ++ CALL CROT( JCH+1-IFRSTM, H( IFRSTM, JCH ), 1, ++ $ H( IFRSTM, JCH-1 ), 1, C, S ) ++ CALL CROT( JCH-IFRSTM, T( IFRSTM, JCH ), 1, ++ $ T( IFRSTM, JCH-1 ), 1, C, S ) + IF( ILZ ) + $ CALL CROT( N, Z( 1, JCH ), 1, Z( 1, JCH-1 ), 1, + $ C, S ) +@@ -497,42 +522,42 @@ + INFO = 2*N + 1 + GO TO 210 + * +-* B(ILAST,ILAST)=0 -- clear A(ILAST,ILAST-1) to split off a ++* T(ILAST,ILAST)=0 -- clear H(ILAST,ILAST-1) to split off a + * 1x1 block. + * + 50 CONTINUE +- CTEMP = A( ILAST, ILAST ) +- CALL CLARTG( CTEMP, A( ILAST, ILAST-1 ), C, S, +- $ A( ILAST, ILAST ) ) +- A( ILAST, ILAST-1 ) = CZERO +- CALL CROT( ILAST-IFRSTM, A( IFRSTM, ILAST ), 1, +- $ A( IFRSTM, ILAST-1 ), 1, C, S ) +- CALL CROT( ILAST-IFRSTM, B( IFRSTM, ILAST ), 1, +- $ B( IFRSTM, ILAST-1 ), 1, C, S ) ++ CTEMP = H( ILAST, ILAST ) ++ CALL CLARTG( CTEMP, H( ILAST, ILAST-1 ), C, S, ++ $ H( ILAST, ILAST ) ) ++ H( ILAST, ILAST-1 ) = CZERO ++ CALL CROT( ILAST-IFRSTM, H( IFRSTM, ILAST ), 1, ++ $ H( IFRSTM, ILAST-1 ), 1, C, S ) ++ CALL CROT( ILAST-IFRSTM, T( IFRSTM, ILAST ), 1, ++ $ T( IFRSTM, ILAST-1 ), 1, C, S ) + IF( ILZ ) + $ CALL CROT( N, Z( 1, ILAST ), 1, Z( 1, ILAST-1 ), 1, C, S ) + * +-* A(ILAST,ILAST-1)=0 -- Standardize B, set ALPHA and BETA ++* H(ILAST,ILAST-1)=0 -- Standardize B, set ALPHA and BETA + * + 60 CONTINUE +- ABSB = ABS( B( ILAST, ILAST ) ) ++ ABSB = ABS( T( ILAST, ILAST ) ) + IF( ABSB.GT.SAFMIN ) THEN +- SIGNBC = CONJG( B( ILAST, ILAST ) / ABSB ) +- B( ILAST, ILAST ) = ABSB ++ SIGNBC = CONJG( T( ILAST, ILAST ) / ABSB ) ++ T( ILAST, ILAST ) = ABSB + IF( ILSCHR ) THEN +- CALL CSCAL( ILAST-IFRSTM, SIGNBC, B( IFRSTM, ILAST ), 1 ) +- CALL CSCAL( ILAST+1-IFRSTM, SIGNBC, A( IFRSTM, ILAST ), ++ CALL CSCAL( ILAST-IFRSTM, SIGNBC, T( IFRSTM, ILAST ), 1 ) ++ CALL CSCAL( ILAST+1-IFRSTM, SIGNBC, H( IFRSTM, ILAST ), + $ 1 ) + ELSE +- A( ILAST, ILAST ) = A( ILAST, ILAST )*SIGNBC ++ H( ILAST, ILAST ) = H( ILAST, ILAST )*SIGNBC + END IF + IF( ILZ ) + $ CALL CSCAL( N, SIGNBC, Z( 1, ILAST ), 1 ) + ELSE +- B( ILAST, ILAST ) = CZERO ++ T( ILAST, ILAST ) = CZERO + END IF +- ALPHA( ILAST ) = A( ILAST, ILAST ) +- BETA( ILAST ) = B( ILAST, ILAST ) ++ ALPHA( ILAST ) = H( ILAST, ILAST ) ++ BETA( ILAST ) = T( ILAST, ILAST ) + * + * Go to next block -- exit if finished. + * +@@ -565,7 +590,7 @@ + * Compute the Shift. + * + * At this point, IFIRST < ILAST, and the diagonal elements of +-* B(IFIRST:ILAST,IFIRST,ILAST) are larger than BTOL (in ++* T(IFIRST:ILAST,IFIRST,ILAST) are larger than BTOL (in + * magnitude) + * + IF( ( IITER / 10 )*10.NE.IITER ) THEN +@@ -577,33 +602,33 @@ + * We factor B as U*D, where U has unit diagonals, and + * compute (A*inv(D))*inv(U). + * +- U12 = ( BSCALE*B( ILAST-1, ILAST ) ) / +- $ ( BSCALE*B( ILAST, ILAST ) ) +- AD11 = ( ASCALE*A( ILAST-1, ILAST-1 ) ) / +- $ ( BSCALE*B( ILAST-1, ILAST-1 ) ) +- AD21 = ( ASCALE*A( ILAST, ILAST-1 ) ) / +- $ ( BSCALE*B( ILAST-1, ILAST-1 ) ) +- AD12 = ( ASCALE*A( ILAST-1, ILAST ) ) / +- $ ( BSCALE*B( ILAST, ILAST ) ) +- AD22 = ( ASCALE*A( ILAST, ILAST ) ) / +- $ ( BSCALE*B( ILAST, ILAST ) ) ++ U12 = ( BSCALE*T( ILAST-1, ILAST ) ) / ++ $ ( BSCALE*T( ILAST, ILAST ) ) ++ AD11 = ( ASCALE*H( ILAST-1, ILAST-1 ) ) / ++ $ ( BSCALE*T( ILAST-1, ILAST-1 ) ) ++ AD21 = ( ASCALE*H( ILAST, ILAST-1 ) ) / ++ $ ( BSCALE*T( ILAST-1, ILAST-1 ) ) ++ AD12 = ( ASCALE*H( ILAST-1, ILAST ) ) / ++ $ ( BSCALE*T( ILAST, ILAST ) ) ++ AD22 = ( ASCALE*H( ILAST, ILAST ) ) / ++ $ ( BSCALE*T( ILAST, ILAST ) ) + ABI22 = AD22 - U12*AD21 + * +- T = HALF*( AD11+ABI22 ) +- RTDISC = SQRT( T**2+AD12*AD21-AD11*AD22 ) +- TEMP = REAL( T-ABI22 )*REAL( RTDISC ) + +- $ AIMAG( T-ABI22 )*AIMAG( RTDISC ) ++ T1 = HALF*( AD11+ABI22 ) ++ RTDISC = SQRT( T1**2+AD12*AD21-AD11*AD22 ) ++ TEMP = REAL( T1-ABI22 )*REAL( RTDISC ) + ++ $ AIMAG( T1-ABI22 )*AIMAG( RTDISC ) + IF( TEMP.LE.ZERO ) THEN +- SHIFT = T + RTDISC ++ SHIFT = T1 + RTDISC + ELSE +- SHIFT = T - RTDISC ++ SHIFT = T1 - RTDISC + END IF + ELSE + * + * Exceptional shift. Chosen for no particularly good reason. + * +- ESHIFT = ESHIFT + CONJG( ( ASCALE*A( ILAST-1, ILAST ) ) / +- $ ( BSCALE*B( ILAST-1, ILAST-1 ) ) ) ++ ESHIFT = ESHIFT + CONJG( ( ASCALE*H( ILAST-1, ILAST ) ) / ++ $ ( BSCALE*T( ILAST-1, ILAST-1 ) ) ) + SHIFT = ESHIFT + END IF + * +@@ -611,46 +636,46 @@ + * + DO 80 J = ILAST - 1, IFIRST + 1, -1 + ISTART = J +- CTEMP = ASCALE*A( J, J ) - SHIFT*( BSCALE*B( J, J ) ) ++ CTEMP = ASCALE*H( J, J ) - SHIFT*( BSCALE*T( J, J ) ) + TEMP = ABS1( CTEMP ) +- TEMP2 = ASCALE*ABS1( A( J+1, J ) ) ++ TEMP2 = ASCALE*ABS1( H( J+1, J ) ) + TEMPR = MAX( TEMP, TEMP2 ) + IF( TEMPR.LT.ONE .AND. TEMPR.NE.ZERO ) THEN + TEMP = TEMP / TEMPR + TEMP2 = TEMP2 / TEMPR + END IF +- IF( ABS1( A( J, J-1 ) )*TEMP2.LE.TEMP*ATOL ) ++ IF( ABS1( H( J, J-1 ) )*TEMP2.LE.TEMP*ATOL ) + $ GO TO 90 + 80 CONTINUE + * + ISTART = IFIRST +- CTEMP = ASCALE*A( IFIRST, IFIRST ) - +- $ SHIFT*( BSCALE*B( IFIRST, IFIRST ) ) ++ CTEMP = ASCALE*H( IFIRST, IFIRST ) - ++ $ SHIFT*( BSCALE*T( IFIRST, IFIRST ) ) + 90 CONTINUE + * + * Do an implicit-shift QZ sweep. + * + * Initial Q + * +- CTEMP2 = ASCALE*A( ISTART+1, ISTART ) ++ CTEMP2 = ASCALE*H( ISTART+1, ISTART ) + CALL CLARTG( CTEMP, CTEMP2, C, S, CTEMP3 ) + * + * Sweep + * + DO 150 J = ISTART, ILAST - 1 + IF( J.GT.ISTART ) THEN +- CTEMP = A( J, J-1 ) +- CALL CLARTG( CTEMP, A( J+1, J-1 ), C, S, A( J, J-1 ) ) +- A( J+1, J-1 ) = CZERO ++ CTEMP = H( J, J-1 ) ++ CALL CLARTG( CTEMP, H( J+1, J-1 ), C, S, H( J, J-1 ) ) ++ H( J+1, J-1 ) = CZERO + END IF + * + DO 100 JC = J, ILASTM +- CTEMP = C*A( J, JC ) + S*A( J+1, JC ) +- A( J+1, JC ) = -CONJG( S )*A( J, JC ) + C*A( J+1, JC ) +- A( J, JC ) = CTEMP +- CTEMP2 = C*B( J, JC ) + S*B( J+1, JC ) +- B( J+1, JC ) = -CONJG( S )*B( J, JC ) + C*B( J+1, JC ) +- B( J, JC ) = CTEMP2 ++ CTEMP = C*H( J, JC ) + S*H( J+1, JC ) ++ H( J+1, JC ) = -CONJG( S )*H( J, JC ) + C*H( J+1, JC ) ++ H( J, JC ) = CTEMP ++ CTEMP2 = C*T( J, JC ) + S*T( J+1, JC ) ++ T( J+1, JC ) = -CONJG( S )*T( J, JC ) + C*T( J+1, JC ) ++ T( J, JC ) = CTEMP2 + 100 CONTINUE + IF( ILQ ) THEN + DO 110 JR = 1, N +@@ -660,19 +685,19 @@ + 110 CONTINUE + END IF + * +- CTEMP = B( J+1, J+1 ) +- CALL CLARTG( CTEMP, B( J+1, J ), C, S, B( J+1, J+1 ) ) +- B( J+1, J ) = CZERO ++ CTEMP = T( J+1, J+1 ) ++ CALL CLARTG( CTEMP, T( J+1, J ), C, S, T( J+1, J+1 ) ) ++ T( J+1, J ) = CZERO + * + DO 120 JR = IFRSTM, MIN( J+2, ILAST ) +- CTEMP = C*A( JR, J+1 ) + S*A( JR, J ) +- A( JR, J ) = -CONJG( S )*A( JR, J+1 ) + C*A( JR, J ) +- A( JR, J+1 ) = CTEMP ++ CTEMP = C*H( JR, J+1 ) + S*H( JR, J ) ++ H( JR, J ) = -CONJG( S )*H( JR, J+1 ) + C*H( JR, J ) ++ H( JR, J+1 ) = CTEMP + 120 CONTINUE + DO 130 JR = IFRSTM, J +- CTEMP = C*B( JR, J+1 ) + S*B( JR, J ) +- B( JR, J ) = -CONJG( S )*B( JR, J+1 ) + C*B( JR, J ) +- B( JR, J+1 ) = CTEMP ++ CTEMP = C*T( JR, J+1 ) + S*T( JR, J ) ++ T( JR, J ) = -CONJG( S )*T( JR, J+1 ) + C*T( JR, J ) ++ T( JR, J+1 ) = CTEMP + 130 CONTINUE + IF( ILZ ) THEN + DO 140 JR = 1, N +@@ -700,23 +725,23 @@ + * Set Eigenvalues 1:ILO-1 + * + DO 200 J = 1, ILO - 1 +- ABSB = ABS( B( J, J ) ) ++ ABSB = ABS( T( J, J ) ) + IF( ABSB.GT.SAFMIN ) THEN +- SIGNBC = CONJG( B( J, J ) / ABSB ) +- B( J, J ) = ABSB ++ SIGNBC = CONJG( T( J, J ) / ABSB ) ++ T( J, J ) = ABSB + IF( ILSCHR ) THEN +- CALL CSCAL( J-1, SIGNBC, B( 1, J ), 1 ) +- CALL CSCAL( J, SIGNBC, A( 1, J ), 1 ) ++ CALL CSCAL( J-1, SIGNBC, T( 1, J ), 1 ) ++ CALL CSCAL( J, SIGNBC, H( 1, J ), 1 ) + ELSE +- A( J, J ) = A( J, J )*SIGNBC ++ H( J, J ) = H( J, J )*SIGNBC + END IF + IF( ILZ ) + $ CALL CSCAL( N, SIGNBC, Z( 1, J ), 1 ) + ELSE +- B( J, J ) = CZERO ++ T( J, J ) = CZERO + END IF +- ALPHA( J ) = A( J, J ) +- BETA( J ) = B( J, J ) ++ ALPHA( J ) = H( J, J ) ++ BETA( J ) = T( J, J ) + 200 CONTINUE + * + * Normal Termination +diff -uNr LAPACK.orig/SRC/clasr.f LAPACK/SRC/clasr.f +--- LAPACK.orig/SRC/clasr.f Thu Nov 4 14:24:17 1999 ++++ LAPACK/SRC/clasr.f Fri May 25 16:12:37 2001 +@@ -3,7 +3,7 @@ + * -- LAPACK auxiliary routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* October 31, 1992 ++* May 3, 2001 + * + * .. Scalar Arguments .. + CHARACTER DIRECT, PIVOT, SIDE +@@ -17,42 +17,77 @@ + * Purpose + * ======= + * +-* CLASR performs the transformation ++* CLASR applies a sequence of real plane rotations to a complex matrix ++* A, from either the left or the right. + * +-* A := P*A, when SIDE = 'L' or 'l' ( Left-hand side ) ++* When SIDE = 'L', the transformation takes the form + * +-* A := A*P', when SIDE = 'R' or 'r' ( Right-hand side ) ++* A := P*A + * +-* where A is an m by n complex matrix and P is an orthogonal matrix, +-* consisting of a sequence of plane rotations determined by the +-* parameters PIVOT and DIRECT as follows ( z = m when SIDE = 'L' or 'l' +-* and z = n when SIDE = 'R' or 'r' ): ++* and when SIDE = 'R', the transformation takes the form + * +-* When DIRECT = 'F' or 'f' ( Forward sequence ) then +-* +-* P = P( z - 1 )*...*P( 2 )*P( 1 ), +-* +-* and when DIRECT = 'B' or 'b' ( Backward sequence ) then +-* +-* P = P( 1 )*P( 2 )*...*P( z - 1 ), +-* +-* where P( k ) is a plane rotation matrix for the following planes: +-* +-* when PIVOT = 'V' or 'v' ( Variable pivot ), +-* the plane ( k, k + 1 ) +-* +-* when PIVOT = 'T' or 't' ( Top pivot ), +-* the plane ( 1, k + 1 ) +-* +-* when PIVOT = 'B' or 'b' ( Bottom pivot ), +-* the plane ( k, z ) +-* +-* c( k ) and s( k ) must contain the cosine and sine that define the +-* matrix P( k ). The two by two plane rotation part of the matrix +-* P( k ), R( k ), is assumed to be of the form +-* +-* R( k ) = ( c( k ) s( k ) ). +-* ( -s( k ) c( k ) ) ++* A := A*P**T ++* ++* where P is an orthogonal matrix consisting of a sequence of z plane ++* rotations, with z = M when SIDE = 'L' and z = N when SIDE = 'R', ++* and P**T is the transpose of P. ++* ++* When DIRECT = 'F' (Forward sequence), then ++* ++* P = P(z-1) * ... * P(2) * P(1) ++* ++* and when DIRECT = 'B' (Backward sequence), then ++* ++* P = P(1) * P(2) * ... * P(z-1) ++* ++* where P(k) is a plane rotation matrix defined by the 2-by-2 rotation ++* ++* R(k) = ( c(k) s(k) ) ++* = ( -s(k) c(k) ). ++* ++* When PIVOT = 'V' (Variable pivot), the rotation is performed ++* for the plane (k,k+1), i.e., P(k) has the form ++* ++* P(k) = ( 1 ) ++* ( ... ) ++* ( 1 ) ++* ( c(k) s(k) ) ++* ( -s(k) c(k) ) ++* ( 1 ) ++* ( ... ) ++* ( 1 ) ++* ++* where R(k) appears as a rank-2 modification to the identity matrix in ++* rows and columns k and k+1. ++* ++* When PIVOT = 'T' (Top pivot), the rotation is performed for the ++* plane (1,k+1), so P(k) has the form ++* ++* P(k) = ( c(k) s(k) ) ++* ( 1 ) ++* ( ... ) ++* ( 1 ) ++* ( -s(k) c(k) ) ++* ( 1 ) ++* ( ... ) ++* ( 1 ) ++* ++* where R(k) appears in rows and columns 1 and k+1. ++* ++* Similarly, when PIVOT = 'B' (Bottom pivot), the rotation is ++* performed for the plane (k,z), giving P(k) the form ++* ++* P(k) = ( 1 ) ++* ( ... ) ++* ( 1 ) ++* ( c(k) s(k) ) ++* ( 1 ) ++* ( ... ) ++* ( 1 ) ++* ( -s(k) c(k) ) ++* ++* where R(k) appears in rows and columns k and z. The rotations are ++* performed without ever forming P(k) explicitly. + * + * Arguments + * ========= +@@ -61,13 +96,13 @@ + * Specifies whether the plane rotation matrix P is applied to + * A on the left or the right. + * = 'L': Left, compute A := P*A +-* = 'R': Right, compute A:= A*P' ++* = 'R': Right, compute A:= A*P**T + * + * DIRECT (input) CHARACTER*1 + * Specifies whether P is a forward or backward sequence of + * plane rotations. +-* = 'F': Forward, P = P( z - 1 )*...*P( 2 )*P( 1 ) +-* = 'B': Backward, P = P( 1 )*P( 2 )*...*P( z - 1 ) ++* = 'F': Forward, P = P(z-1)*...*P(2)*P(1) ++* = 'B': Backward, P = P(1)*P(2)*...*P(z-1) + * + * PIVOT (input) CHARACTER*1 + * Specifies the plane for which P(k) is a plane rotation +@@ -84,18 +119,22 @@ + * The number of columns of the matrix A. If n <= 1, an + * immediate return is effected. + * +-* C, S (input) REAL arrays, dimension ++* C (input) REAL array, dimension ++* (M-1) if SIDE = 'L' ++* (N-1) if SIDE = 'R' ++* The cosines c(k) of the plane rotations. ++* ++* S (input) REAL array, dimension + * (M-1) if SIDE = 'L' + * (N-1) if SIDE = 'R' +-* c(k) and s(k) contain the cosine and sine that define the +-* matrix P(k). The two by two plane rotation part of the +-* matrix P(k), R(k), is assumed to be of the form +-* R( k ) = ( c( k ) s( k ) ). +-* ( -s( k ) c( k ) ) ++* The sines s(k) of the plane rotations. The 2-by-2 plane ++* rotation part of the matrix P(k), R(k), has the form ++* R(k) = ( c(k) s(k) ) ++* ( -s(k) c(k) ). + * + * A (input/output) COMPLEX array, dimension (LDA,N) +-* The m by n matrix A. On exit, A is overwritten by P*A if +-* SIDE = 'R' or by A*P' if SIDE = 'L'. ++* The M-by-N matrix A. On exit, A is overwritten by P*A if ++* SIDE = 'R' or by A*P**T if SIDE = 'L'. + * + * LDA (input) INTEGER + * The leading dimension of the array A. LDA >= max(1,M). +diff -uNr LAPACK.orig/SRC/ctgevc.f LAPACK/SRC/ctgevc.f +--- LAPACK.orig/SRC/ctgevc.f Thu Nov 4 14:26:09 1999 ++++ LAPACK/SRC/ctgevc.f Fri May 25 16:13:37 2001 +@@ -1,19 +1,19 @@ +- SUBROUTINE CTGEVC( SIDE, HOWMNY, SELECT, N, A, LDA, B, LDB, VL, ++ SUBROUTINE CTGEVC( SIDE, HOWMNY, SELECT, N, S, LDS, P, LDP, VL, + $ LDVL, VR, LDVR, MM, M, WORK, RWORK, INFO ) + * + * -- LAPACK routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* June 30, 1999 ++* May 4, 2001 + * + * .. Scalar Arguments .. + CHARACTER HOWMNY, SIDE +- INTEGER INFO, LDA, LDB, LDVL, LDVR, M, MM, N ++ INTEGER INFO, LDP, LDS, LDVL, LDVR, M, MM, N + * .. + * .. Array Arguments .. + LOGICAL SELECT( * ) + REAL RWORK( * ) +- COMPLEX A( LDA, * ), B( LDB, * ), VL( LDVL, * ), ++ COMPLEX P( LDP, * ), S( LDS, * ), VL( LDVL, * ), + $ VR( LDVR, * ), WORK( * ) + * .. + * +@@ -21,28 +21,30 @@ + * Purpose + * ======= + * +-* CTGEVC computes some or all of the right and/or left generalized +-* eigenvectors of a pair of complex upper triangular matrices (A,B). +-* +-* The right generalized eigenvector x and the left generalized +-* eigenvector y of (A,B) corresponding to a generalized eigenvalue +-* w are defined by: +-* +-* (A - wB) * x = 0 and y**H * (A - wB) = 0 +-* ++* CTGEVC computes some or all of the right and/or left eigenvectors of ++* a pair of complex matrices (S,P), where S and P are upper triangular. ++* Matrix pairs of this type are produced by the generalized Schur ++* factorization of a complex matrix pair (A,B): ++* ++* A = Q*S*Z**H, B = Q*P*Z**H ++* ++* as computed by CGGHRD + CHGEQZ. ++* ++* The right eigenvector x and the left eigenvector y of (S,P) ++* corresponding to an eigenvalue w are defined by: ++* ++* S*x = w*P*x, (y**H)*S = w*(y**H)*P, ++* + * where y**H denotes the conjugate tranpose of y. +-* +-* If an eigenvalue w is determined by zero diagonal elements of both A +-* and B, a unit vector is returned as the corresponding eigenvector. +-* +-* If all eigenvectors are requested, the routine may either return +-* the matrices X and/or Y of right or left eigenvectors of (A,B), or +-* the products Z*X and/or Q*Y, where Z and Q are input unitary +-* matrices. If (A,B) was obtained from the generalized Schur +-* factorization of an original pair of matrices +-* (A0,B0) = (Q*A*Z**H,Q*B*Z**H), +-* then Z*X and Q*Y are the matrices of right or left eigenvectors of +-* A. ++* The eigenvalues are not input to this routine, but are computed ++* directly from the diagonal elements of S and P. ++* ++* This routine returns the matrices X and/or Y of right and left ++* eigenvectors of (S,P), or the products Z*X and/or Q*Y, ++* where Z and Q are input matrices. ++* If Q and Z are the unitary factors from the generalized Schur ++* factorization of a matrix pair (A,B), then Z*X and Q*Y ++* are the matrices of right and left eigenvectors of (A,B). + * + * Arguments + * ========= +@@ -54,66 +56,66 @@ + * + * HOWMNY (input) CHARACTER*1 + * = 'A': compute all right and/or left eigenvectors; +-* = 'B': compute all right and/or left eigenvectors, and +-* backtransform them using the input matrices supplied +-* in VR and/or VL; ++* = 'B': compute all right and/or left eigenvectors, ++* backtransformed by the matrices in VR and/or VL; + * = 'S': compute selected right and/or left eigenvectors, + * specified by the logical array SELECT. + * + * SELECT (input) LOGICAL array, dimension (N) + * If HOWMNY='S', SELECT specifies the eigenvectors to be +-* computed. +-* If HOWMNY='A' or 'B', SELECT is not referenced. +-* To select the eigenvector corresponding to the j-th +-* eigenvalue, SELECT(j) must be set to .TRUE.. ++* computed. The eigenvector corresponding to the j-th ++* eigenvalue is computed if SELECT(j) = .TRUE.. ++* Not referenced if HOWMNY = 'A' or 'B'. + * + * N (input) INTEGER +-* The order of the matrices A and B. N >= 0. +-* +-* A (input) COMPLEX array, dimension (LDA,N) +-* The upper triangular matrix A. +-* +-* LDA (input) INTEGER +-* The leading dimension of array A. LDA >= max(1,N). ++* The order of the matrices S and P. N >= 0. + * +-* B (input) COMPLEX array, dimension (LDB,N) +-* The upper triangular matrix B. B must have real diagonal +-* elements. ++* S (input) COMPLEX array, dimension (LDS,N) ++* The upper triangular matrix S from a generalized Schur ++* factorization, as computed by CHGEQZ. ++* ++* LDS (input) INTEGER ++* The leading dimension of array S. LDS >= max(1,N). ++* ++* P (input) COMPLEX array, dimension (LDP,N) ++* The upper triangular matrix P from a generalized Schur ++* factorization, as computed by CHGEQZ. P must have real ++* diagonal elements. + * +-* LDB (input) INTEGER +-* The leading dimension of array B. LDB >= max(1,N). ++* LDP (input) INTEGER ++* The leading dimension of array P. LDP >= max(1,N). + * + * VL (input/output) COMPLEX array, dimension (LDVL,MM) + * On entry, if SIDE = 'L' or 'B' and HOWMNY = 'B', VL must + * contain an N-by-N matrix Q (usually the unitary matrix Q + * of left Schur vectors returned by CHGEQZ). + * On exit, if SIDE = 'L' or 'B', VL contains: +-* if HOWMNY = 'A', the matrix Y of left eigenvectors of (A,B); ++* if HOWMNY = 'A', the matrix Y of left eigenvectors of (S,P); + * if HOWMNY = 'B', the matrix Q*Y; +-* if HOWMNY = 'S', the left eigenvectors of (A,B) specified by ++* if HOWMNY = 'S', the left eigenvectors of (S,P) specified by + * SELECT, stored consecutively in the columns of + * VL, in the same order as their eigenvalues. +-* If SIDE = 'R', VL is not referenced. ++* Not referenced if SIDE = 'R'. + * + * LDVL (input) INTEGER +-* The leading dimension of array VL. +-* LDVL >= max(1,N) if SIDE = 'L' or 'B'; LDVL >= 1 otherwise. ++* The leading dimension of array VL. LDVL >= 1, and if ++* SIDE = 'L' or 'l' or 'B' or 'b', LDVL >= N. + * + * VR (input/output) COMPLEX array, dimension (LDVR,MM) + * On entry, if SIDE = 'R' or 'B' and HOWMNY = 'B', VR must + * contain an N-by-N matrix Q (usually the unitary matrix Z + * of right Schur vectors returned by CHGEQZ). + * On exit, if SIDE = 'R' or 'B', VR contains: +-* if HOWMNY = 'A', the matrix X of right eigenvectors of (A,B); ++* if HOWMNY = 'A', the matrix X of right eigenvectors of (S,P); + * if HOWMNY = 'B', the matrix Z*X; +-* if HOWMNY = 'S', the right eigenvectors of (A,B) specified by ++* if HOWMNY = 'S', the right eigenvectors of (S,P) specified by + * SELECT, stored consecutively in the columns of + * VR, in the same order as their eigenvalues. +-* If SIDE = 'L', VR is not referenced. ++* Not referenced if SIDE = 'L'. + * + * LDVR (input) INTEGER +-* The leading dimension of the array VR. +-* LDVR >= max(1,N) if SIDE = 'R' or 'B'; LDVR >= 1 otherwise. ++* The leading dimension of the array VR. LDVR >= 1, and if ++* SIDE = 'R' or 'B', LDVR >= N. + * + * MM (input) INTEGER + * The number of columns in the arrays VL and/or VR. MM >= M. +@@ -180,7 +182,7 @@ + IHWMNY = 2 + ILALL = .FALSE. + ILBACK = .FALSE. +- ELSE IF( LSAME( HOWMNY, 'B' ) .OR. LSAME( HOWMNY, 'T' ) ) THEN ++ ELSE IF( LSAME( HOWMNY, 'B' ) ) THEN + IHWMNY = 3 + ILALL = .TRUE. + ILBACK = .TRUE. +@@ -211,9 +213,9 @@ + INFO = -2 + ELSE IF( N.LT.0 ) THEN + INFO = -4 +- ELSE IF( LDA.LT.MAX( 1, N ) ) THEN ++ ELSE IF( LDS.LT.MAX( 1, N ) ) THEN + INFO = -6 +- ELSE IF( LDB.LT.MAX( 1, N ) ) THEN ++ ELSE IF( LDP.LT.MAX( 1, N ) ) THEN + INFO = -8 + END IF + IF( INFO.NE.0 ) THEN +@@ -237,7 +239,7 @@ + * + ILBBAD = .FALSE. + DO 20 J = 1, N +- IF( AIMAG( B( J, J ) ).NE.ZERO ) ++ IF( AIMAG( P( J, J ) ).NE.ZERO ) + $ ILBBAD = .TRUE. + 20 CONTINUE + * +@@ -275,19 +277,19 @@ + * part of A and B to check for possible overflow in the triangular + * solver. + * +- ANORM = ABS1( A( 1, 1 ) ) +- BNORM = ABS1( B( 1, 1 ) ) ++ ANORM = ABS1( S( 1, 1 ) ) ++ BNORM = ABS1( P( 1, 1 ) ) + RWORK( 1 ) = ZERO + RWORK( N+1 ) = ZERO + DO 40 J = 2, N + RWORK( J ) = ZERO + RWORK( N+J ) = ZERO + DO 30 I = 1, J - 1 +- RWORK( J ) = RWORK( J ) + ABS1( A( I, J ) ) +- RWORK( N+J ) = RWORK( N+J ) + ABS1( B( I, J ) ) ++ RWORK( J ) = RWORK( J ) + ABS1( S( I, J ) ) ++ RWORK( N+J ) = RWORK( N+J ) + ABS1( P( I, J ) ) + 30 CONTINUE +- ANORM = MAX( ANORM, RWORK( J )+ABS1( A( J, J ) ) ) +- BNORM = MAX( BNORM, RWORK( N+J )+ABS1( B( J, J ) ) ) ++ ANORM = MAX( ANORM, RWORK( J )+ABS1( S( J, J ) ) ) ++ BNORM = MAX( BNORM, RWORK( N+J )+ABS1( P( J, J ) ) ) + 40 CONTINUE + * + ASCALE = ONE / MAX( ANORM, SAFMIN ) +@@ -309,8 +311,8 @@ + IF( ILCOMP ) THEN + IEIG = IEIG + 1 + * +- IF( ABS1( A( JE, JE ) ).LE.SAFMIN .AND. +- $ ABS( REAL( B( JE, JE ) ) ).LE.SAFMIN ) THEN ++ IF( ABS1( S( JE, JE ) ).LE.SAFMIN .AND. ++ $ ABS( REAL( P( JE, JE ) ) ).LE.SAFMIN ) THEN + * + * Singular matrix pencil -- return unit eigenvector + * +@@ -326,10 +328,10 @@ + * H + * y ( a A - b B ) = 0 + * +- TEMP = ONE / MAX( ABS1( A( JE, JE ) )*ASCALE, +- $ ABS( REAL( B( JE, JE ) ) )*BSCALE, SAFMIN ) +- SALPHA = ( TEMP*A( JE, JE ) )*ASCALE +- SBETA = ( TEMP*REAL( B( JE, JE ) ) )*BSCALE ++ TEMP = ONE / MAX( ABS1( S( JE, JE ) )*ASCALE, ++ $ ABS( REAL( P( JE, JE ) ) )*BSCALE, SAFMIN ) ++ SALPHA = ( TEMP*S( JE, JE ) )*ASCALE ++ SBETA = ( TEMP*REAL( P( JE, JE ) ) )*BSCALE + ACOEFF = SBETA*ASCALE + BCOEFF = SALPHA*BSCALE + * +@@ -380,7 +382,7 @@ + * + * Compute + * j-1 +-* SUM = sum conjg( a*A(k,j) - b*B(k,j) )*x(k) ++* SUM = sum conjg( a*S(k,j) - b*P(k,j) )*x(k) + * k=je + * (Scale if necessary) + * +@@ -396,16 +398,16 @@ + SUMB = CZERO + * + DO 80 JR = JE, J - 1 +- SUMA = SUMA + CONJG( A( JR, J ) )*WORK( JR ) +- SUMB = SUMB + CONJG( B( JR, J ) )*WORK( JR ) ++ SUMA = SUMA + CONJG( S( JR, J ) )*WORK( JR ) ++ SUMB = SUMB + CONJG( P( JR, J ) )*WORK( JR ) + 80 CONTINUE + SUM = ACOEFF*SUMA - CONJG( BCOEFF )*SUMB + * +-* Form x(j) = - SUM / conjg( a*A(j,j) - b*B(j,j) ) ++* Form x(j) = - SUM / conjg( a*S(j,j) - b*P(j,j) ) + * + * with scaling and perturbation of the denominator + * +- D = CONJG( ACOEFF*A( J, J )-BCOEFF*B( J, J ) ) ++ D = CONJG( ACOEFF*S( J, J )-BCOEFF*P( J, J ) ) + IF( ABS1( D ).LE.DMIN ) + $ D = CMPLX( DMIN ) + * +@@ -475,8 +477,8 @@ + IF( ILCOMP ) THEN + IEIG = IEIG - 1 + * +- IF( ABS1( A( JE, JE ) ).LE.SAFMIN .AND. +- $ ABS( REAL( B( JE, JE ) ) ).LE.SAFMIN ) THEN ++ IF( ABS1( S( JE, JE ) ).LE.SAFMIN .AND. ++ $ ABS( REAL( P( JE, JE ) ) ).LE.SAFMIN ) THEN + * + * Singular matrix pencil -- return unit eigenvector + * +@@ -492,10 +494,10 @@ + * + * ( a A - b B ) x = 0 + * +- TEMP = ONE / MAX( ABS1( A( JE, JE ) )*ASCALE, +- $ ABS( REAL( B( JE, JE ) ) )*BSCALE, SAFMIN ) +- SALPHA = ( TEMP*A( JE, JE ) )*ASCALE +- SBETA = ( TEMP*REAL( B( JE, JE ) ) )*BSCALE ++ TEMP = ONE / MAX( ABS1( S( JE, JE ) )*ASCALE, ++ $ ABS( REAL( P( JE, JE ) ) )*BSCALE, SAFMIN ) ++ SALPHA = ( TEMP*S( JE, JE ) )*ASCALE ++ SBETA = ( TEMP*REAL( P( JE, JE ) ) )*BSCALE + ACOEFF = SBETA*ASCALE + BCOEFF = SALPHA*BSCALE + * +@@ -542,7 +544,7 @@ + * WORK(j+1:JE) contains x + * + DO 170 JR = 1, JE - 1 +- WORK( JR ) = ACOEFF*A( JR, JE ) - BCOEFF*B( JR, JE ) ++ WORK( JR ) = ACOEFF*S( JR, JE ) - BCOEFF*P( JR, JE ) + 170 CONTINUE + WORK( JE ) = CONE + * +@@ -551,7 +553,7 @@ + * Form x(j) := - w(j) / d + * with scaling and perturbation of the denominator + * +- D = ACOEFF*A( J, J ) - BCOEFF*B( J, J ) ++ D = ACOEFF*S( J, J ) - BCOEFF*P( J, J ) + IF( ABS1( D ).LE.DMIN ) + $ D = CMPLX( DMIN ) + * +@@ -568,7 +570,7 @@ + * + IF( J.GT.1 ) THEN + * +-* w = w + x(j)*(a A(*,j) - b B(*,j) ) with scaling ++* w = w + x(j)*(a S(*,j) - b P(*,j) ) with scaling + * + IF( ABS1( WORK( J ) ).GT.ONE ) THEN + TEMP = ONE / ABS1( WORK( J ) ) +@@ -583,8 +585,8 @@ + CA = ACOEFF*WORK( J ) + CB = BCOEFF*WORK( J ) + DO 200 JR = 1, J - 1 +- WORK( JR ) = WORK( JR ) + CA*A( JR, J ) - +- $ CB*B( JR, J ) ++ WORK( JR ) = WORK( JR ) + CA*S( JR, J ) - ++ $ CB*P( JR, J ) + 200 CONTINUE + END IF + 210 CONTINUE +diff -uNr LAPACK.orig/SRC/ctrevc.f LAPACK/SRC/ctrevc.f +--- LAPACK.orig/SRC/ctrevc.f Thu Nov 4 14:24:23 1999 ++++ LAPACK/SRC/ctrevc.f Fri May 25 16:13:56 2001 +@@ -4,7 +4,7 @@ + * -- LAPACK routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* June 30, 1999 ++* May 7, 2001 + * + * .. Scalar Arguments .. + CHARACTER HOWMNY, SIDE +@@ -22,20 +22,23 @@ + * + * CTREVC computes some or all of the right and/or left eigenvectors of + * a complex upper triangular matrix T. +-* ++* Matrices of this type are produced by the Schur factorization of ++* a complex general matrix: A = Q*T*Q**H, as computed by CHSEQR. ++* + * The right eigenvector x and the left eigenvector y of T corresponding + * to an eigenvalue w are defined by: +-* +-* T*x = w*x, y'*T = w*y' +-* +-* where y' denotes the conjugate transpose of the vector y. +-* +-* If all eigenvectors are requested, the routine may either return the +-* matrices X and/or Y of right or left eigenvectors of T, or the +-* products Q*X and/or Q*Y, where Q is an input unitary +-* matrix. If T was obtained from the Schur factorization of an +-* original matrix A = Q*T*Q', then Q*X and Q*Y are the matrices of +-* right or left eigenvectors of A. ++* ++* T*x = w*x, (y**H)*T = w*(y**H) ++* ++* where y**H denotes the conjugate transpose of the vector y. ++* The eigenvalues are not input to this routine, but are read directly ++* from the diagonal of T. ++* ++* This routine returns the matrices X and/or Y of right and left ++* eigenvectors of T, or the products Q*X and/or Q*Y, where Q is an ++* input matrix. If Q is the unitary factor that reduces a matrix A to ++* Schur form T, then Q*X and Q*Y are the matrices of right and left ++* eigenvectors of A. + * + * Arguments + * ========= +@@ -48,17 +51,17 @@ + * HOWMNY (input) CHARACTER*1 + * = 'A': compute all right and/or left eigenvectors; + * = 'B': compute all right and/or left eigenvectors, +-* and backtransform them using the input matrices +-* supplied in VR and/or VL; ++* backtransformed using the matrices supplied in ++* VR and/or VL; + * = 'S': compute selected right and/or left eigenvectors, +-* specified by the logical array SELECT. ++* as indicated by the logical array SELECT. + * + * SELECT (input) LOGICAL array, dimension (N) + * If HOWMNY = 'S', SELECT specifies the eigenvectors to be + * computed. +-* If HOWMNY = 'A' or 'B', SELECT is not referenced. +-* To select the eigenvector corresponding to the j-th +-* eigenvalue, SELECT(j) must be set to .TRUE.. ++* The eigenvector corresponding to the j-th eigenvalue is ++* computed if SELECT(j) = .TRUE.. ++* Not referenced if HOWMNY = 'A' or 'B'. + * + * N (input) INTEGER + * The order of the matrix T. N >= 0. +@@ -76,19 +79,16 @@ + * Schur vectors returned by CHSEQR). + * On exit, if SIDE = 'L' or 'B', VL contains: + * if HOWMNY = 'A', the matrix Y of left eigenvectors of T; +-* VL is lower triangular. The i-th column +-* VL(i) of VL is the eigenvector corresponding +-* to T(i,i). + * if HOWMNY = 'B', the matrix Q*Y; + * if HOWMNY = 'S', the left eigenvectors of T specified by + * SELECT, stored consecutively in the columns + * of VL, in the same order as their + * eigenvalues. +-* If SIDE = 'R', VL is not referenced. ++* Not referenced if SIDE = 'R'. + * + * LDVL (input) INTEGER +-* The leading dimension of the array VL. LDVL >= max(1,N) if +-* SIDE = 'L' or 'B'; LDVL >= 1 otherwise. ++* The leading dimension of the array VL. LDVL >= 1, and if ++* SIDE = 'L' or 'B', LDVL >= N. + * + * VR (input/output) COMPLEX array, dimension (LDVR,MM) + * On entry, if SIDE = 'R' or 'B' and HOWMNY = 'B', VR must +@@ -96,19 +96,16 @@ + * Schur vectors returned by CHSEQR). + * On exit, if SIDE = 'R' or 'B', VR contains: + * if HOWMNY = 'A', the matrix X of right eigenvectors of T; +-* VR is upper triangular. The i-th column +-* VR(i) of VR is the eigenvector corresponding +-* to T(i,i). + * if HOWMNY = 'B', the matrix Q*X; + * if HOWMNY = 'S', the right eigenvectors of T specified by + * SELECT, stored consecutively in the columns + * of VR, in the same order as their + * eigenvalues. +-* If SIDE = 'L', VR is not referenced. ++* Not referenced if SIDE = 'L'. + * + * LDVR (input) INTEGER +-* The leading dimension of the array VR. LDVR >= max(1,N) if +-* SIDE = 'R' or 'B'; LDVR >= 1 otherwise. ++* The leading dimension of the array VR. LDVR >= 1, and if ++* SIDE = 'R' or 'B'; LDVR >= N. + * + * MM (input) INTEGER + * The number of columns in the arrays VL and/or VR. MM >= M. +diff -uNr LAPACK.orig/SRC/ctrsen.f LAPACK/SRC/ctrsen.f +--- LAPACK.orig/SRC/ctrsen.f Thu Nov 4 14:24:24 1999 ++++ LAPACK/SRC/ctrsen.f Fri May 25 16:14:15 2001 +@@ -4,7 +4,7 @@ + * -- LAPACK routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* June 30, 1999 ++* January 3, 2001 + * + * .. Scalar Arguments .. + CHARACTER COMPQ, JOB +@@ -93,14 +93,13 @@ + * If JOB = 'N' or 'E', SEP is not referenced. + * + * WORK (workspace/output) COMPLEX array, dimension (LWORK) +-* If JOB = 'N', WORK is not referenced. Otherwise, +-* on exit, if INFO = 0, WORK(1) returns the optimal LWORK. ++* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. + * + * LWORK (input) INTEGER + * The dimension of the array WORK. + * If JOB = 'N', LWORK >= 1; +-* if JOB = 'E', LWORK = M*(N-M); +-* if JOB = 'V' or 'B', LWORK >= 2*M*(N-M). ++* if JOB = 'E', LWORK = max(1,M*(N-M)); ++* if JOB = 'V' or 'B', LWORK >= max(1,2*M*(N-M)). + * + * If LWORK = -1, then a workspace query is assumed; the routine + * only calculates the optimal size of the WORK array, returns +diff -uNr LAPACK.orig/SRC/ctrsyl.f LAPACK/SRC/ctrsyl.f +--- LAPACK.orig/SRC/ctrsyl.f Thu Nov 4 14:24:24 1999 ++++ LAPACK/SRC/ctrsyl.f Fri May 25 16:14:25 2001 +@@ -4,7 +4,7 @@ + * -- LAPACK routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* June 30, 1999 ++* January 9, 2001 + * + * .. Scalar Arguments .. + CHARACTER TRANA, TRANB +@@ -119,11 +119,9 @@ + NOTRNB = LSAME( TRANB, 'N' ) + * + INFO = 0 +- IF( .NOT.NOTRNA .AND. .NOT.LSAME( TRANA, 'T' ) .AND. .NOT. +- $ LSAME( TRANA, 'C' ) ) THEN ++ IF( .NOT.NOTRNA .AND. .NOT.LSAME( TRANA, 'C' ) ) THEN + INFO = -1 +- ELSE IF( .NOT.NOTRNB .AND. .NOT.LSAME( TRANB, 'T' ) .AND. .NOT. +- $ LSAME( TRANB, 'C' ) ) THEN ++ ELSE IF( .NOT.NOTRNB .AND. .NOT.LSAME( TRANB, 'C' ) ) THEN + INFO = -2 + ELSE IF( ISGN.NE.1 .AND. ISGN.NE.-1 ) THEN + INFO = -3 +diff -uNr LAPACK.orig/SRC/dbdsqr.f LAPACK/SRC/dbdsqr.f +--- LAPACK.orig/SRC/dbdsqr.f Thu Nov 4 14:24:42 1999 ++++ LAPACK/SRC/dbdsqr.f Fri May 25 15:59:00 2001 +@@ -4,7 +4,7 @@ + * -- LAPACK routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* October 31, 1999 ++* April 25, 2001 + * + * .. Scalar Arguments .. + CHARACTER UPLO +@@ -18,14 +18,26 @@ + * Purpose + * ======= + * +-* DBDSQR computes the singular value decomposition (SVD) of a real +-* N-by-N (upper or lower) bidiagonal matrix B: B = Q * S * P' (P' +-* denotes the transpose of P), where S is a diagonal matrix with +-* non-negative diagonal elements (the singular values of B), and Q +-* and P are orthogonal matrices. ++* DBDSQR computes the singular values and, optionally, the right and/or ++* left singular vectors from the singular value decomposition (SVD) of ++* a real N-by-N (upper or lower) bidiagonal matrix B using the implicit ++* zero-shift QR algorithm. The SVD of B has the form ++* ++* B = Q * S * P**T ++* ++* where S is the diagonal matrix of singular values, Q is an orthogonal ++* matrix of left singular vectors, and P is an orthogonal matrix of ++* right singular vectors. If left singular vectors are requested, this ++* subroutine actually returns U*Q instead of Q, and, if right singular ++* vectors are requested, this subroutine returns P**T*VT instead of ++* P**T, for given real input matrices U and VT. When U and VT are the ++* orthogonal matrices that reduce a general matrix A to bidiagonal ++* form: A = U*B*VT, as computed by DGEBRD, then + * +-* The routine computes S, and optionally computes U * Q, P' * VT, +-* or Q' * C, for given real input matrices U, VT, and C. ++* A = (U*Q) * S * (P**T*VT) ++* ++* is the SVD of A. Optionally, the subroutine may also compute Q**T*C ++* for a given real input matrix C. + * + * See "Computing Small Singular Values of Bidiagonal Matrices With + * Guaranteed High Relative Accuracy," by J. Demmel and W. Kahan, +@@ -61,18 +73,17 @@ + * order. + * + * E (input/output) DOUBLE PRECISION array, dimension (N) +-* On entry, the elements of E contain the +-* offdiagonal elements of the bidiagonal matrix whose SVD +-* is desired. On normal exit (INFO = 0), E is destroyed. +-* If the algorithm does not converge (INFO > 0), D and E ++* On entry, the N-1 offdiagonal elements of the bidiagonal ++* matrix B. ++* On exit, if INFO = 0, E is destroyed; if INFO > 0, D and E + * will contain the diagonal and superdiagonal elements of a + * bidiagonal matrix orthogonally equivalent to the one given + * as input. E(N) is used for workspace. + * + * VT (input/output) DOUBLE PRECISION array, dimension (LDVT, NCVT) + * On entry, an N-by-NCVT matrix VT. +-* On exit, VT is overwritten by P' * VT. +-* VT is not referenced if NCVT = 0. ++* On exit, VT is overwritten by P**T * VT. ++* Not referenced if NCVT = 0. + * + * LDVT (input) INTEGER + * The leading dimension of the array VT. +@@ -81,21 +92,22 @@ + * U (input/output) DOUBLE PRECISION array, dimension (LDU, N) + * On entry, an NRU-by-N matrix U. + * On exit, U is overwritten by U * Q. +-* U is not referenced if NRU = 0. ++* Not referenced if NRU = 0. + * + * LDU (input) INTEGER + * The leading dimension of the array U. LDU >= max(1,NRU). + * + * C (input/output) DOUBLE PRECISION array, dimension (LDC, NCC) + * On entry, an N-by-NCC matrix C. +-* On exit, C is overwritten by Q' * C. +-* C is not referenced if NCC = 0. ++* On exit, C is overwritten by Q**T * C. ++* Not referenced if NCC = 0. + * + * LDC (input) INTEGER + * The leading dimension of the array C. + * LDC >= max(1,N) if NCC > 0; LDC >=1 if NCC = 0. + * +-* WORK (workspace) DOUBLE PRECISION array, dimension (4*N) ++* WORK (workspace) DOUBLE PRECISION array, dimension (2*N) ++* if NCVT = NRU = NCC = 0, (max(1, 4*N-4)) otherwise + * + * INFO (output) INTEGER + * = 0: successful exit +diff -uNr LAPACK.orig/SRC/dgebd2.f LAPACK/SRC/dgebd2.f +--- LAPACK.orig/SRC/dgebd2.f Thu Nov 4 14:24:42 1999 ++++ LAPACK/SRC/dgebd2.f Fri May 25 15:59:22 2001 +@@ -3,7 +3,7 @@ + * -- LAPACK routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* February 29, 1992 ++* May 7, 2001 + * + * .. Scalar Arguments .. + INTEGER INFO, LDA, M, N +@@ -169,8 +169,9 @@ + * + * Apply H(i) to A(i:m,i+1:n) from the left + * +- CALL DLARF( 'Left', M-I+1, N-I, A( I, I ), 1, TAUQ( I ), +- $ A( I, I+1 ), LDA, WORK ) ++ IF( I.LT.N ) ++ $ CALL DLARF( 'Left', M-I+1, N-I, A( I, I ), 1, TAUQ( I ), ++ $ A( I, I+1 ), LDA, WORK ) + A( I, I ) = D( I ) + * + IF( I.LT.N ) THEN +@@ -207,8 +208,9 @@ + * + * Apply G(i) to A(i+1:m,i:n) from the right + * +- CALL DLARF( 'Right', M-I, N-I+1, A( I, I ), LDA, TAUP( I ), +- $ A( MIN( I+1, M ), I ), LDA, WORK ) ++ IF( I.LT.M ) ++ $ CALL DLARF( 'Right', M-I, N-I+1, A( I, I ), LDA, ++ $ TAUP( I ), A( MIN( I+1, M ), I ), LDA, WORK ) + A( I, I ) = D( I ) + * + IF( I.LT.M ) THEN +diff -uNr LAPACK.orig/SRC/dgees.f LAPACK/SRC/dgees.f +--- LAPACK.orig/SRC/dgees.f Thu Nov 4 14:24:43 1999 ++++ LAPACK/SRC/dgees.f Fri May 25 15:59:50 2001 +@@ -5,6 +5,7 @@ + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University + * June 30, 1999 ++* 8-15-00: Improve consistency of WS calculations (eca) + * + * .. Scalar Arguments .. + CHARACTER JOBVS, SORT +@@ -110,10 +111,9 @@ + * The dimension of the array WORK. LWORK >= max(1,3*N). + * For good performance, LWORK must generally be larger. + * +-* If LWORK = -1, then a workspace query is assumed; the routine +-* only calculates the optimal size of the WORK array, returns +-* this value as the first entry of the WORK array, and no error +-* message related to LWORK is issued by XERBLA. ++* If LWORK = -1, a workspace query is assumed. The optimal ++* size for the WORK array is calculated and stored in WORK(1), ++* and no other work except argument checking is performed. + * + * BWORK (workspace) LOGICAL array, dimension (N) + * Not referenced if SORT = 'N'. +@@ -138,12 +138,13 @@ + * ===================================================================== + * + * .. Parameters .. ++ INTEGER LQUERV ++ PARAMETER ( LQUERV = -1 ) + DOUBLE PRECISION ZERO, ONE + PARAMETER ( ZERO = 0.0D0, ONE = 1.0D0 ) + * .. + * .. Local Scalars .. +- LOGICAL CURSL, LASTSL, LQUERY, LST2SL, SCALEA, WANTST, +- $ WANTVS ++ LOGICAL CURSL, LASTSL, LST2SL, SCALEA, WANTST, WANTVS + INTEGER HSWORK, I, I1, I2, IBAL, ICOND, IERR, IEVAL, + $ IHI, ILO, INXT, IP, ITAU, IWRK, K, MAXB, + $ MAXWRK, MINWRK +@@ -154,8 +155,8 @@ + DOUBLE PRECISION DUM( 1 ) + * .. + * .. External Subroutines .. +- EXTERNAL DCOPY, DGEBAK, DGEBAL, DGEHRD, DHSEQR, DLACPY, +- $ DLASCL, DORGHR, DSWAP, DTRSEN, XERBLA ++ EXTERNAL DCOPY, DGEBAK, DGEBAL, DGEHRD, DHSEQR, DLABAD, ++ $ DLACPY, DLASCL, DORGHR, DSWAP, DTRSEN, XERBLA + * .. + * .. External Functions .. + LOGICAL LSAME +@@ -171,7 +172,6 @@ + * Test the input arguments + * + INFO = 0 +- LQUERY = ( LWORK.EQ.-1 ) + WANTVS = LSAME( JOBVS, 'V' ) + WANTST = LSAME( SORT, 'S' ) + IF( ( .NOT.WANTVS ) .AND. ( .NOT.LSAME( JOBVS, 'N' ) ) ) THEN +@@ -197,7 +197,7 @@ + * the worst case.) + * + MINWRK = 1 +- IF( INFO.EQ.0 .AND. ( LWORK.GE.1 .OR. LQUERY ) ) THEN ++ IF( INFO.EQ.0 ) THEN + MAXWRK = 2*N + N*ILAENV( 1, 'DGEHRD', ' ', N, 1, N, 0 ) + MINWRK = MAX( 1, 3*N ) + IF( .NOT.WANTVS ) THEN +@@ -216,19 +216,18 @@ + MAXWRK = MAX( MAXWRK, N+HSWORK, 1 ) + END IF + WORK( 1 ) = MAXWRK ++ IF( LWORK.LT.MINWRK .AND. LWORK.NE.LQUERV ) ++ $ INFO = -13 + END IF +- IF( LWORK.LT.MINWRK .AND. .NOT.LQUERY ) THEN +- INFO = -13 +- END IF ++* ++* Quick returns ++* + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'DGEES ', -INFO ) + RETURN +- ELSE IF( LQUERY ) THEN +- RETURN + END IF +-* +-* Quick return if possible +-* ++ IF( LWORK.EQ.LQUERV ) ++ $ RETURN + IF( N.EQ.0 ) THEN + SDIM = 0 + RETURN +diff -uNr LAPACK.orig/SRC/dgeesx.f LAPACK/SRC/dgeesx.f +--- LAPACK.orig/SRC/dgeesx.f Thu Nov 4 14:24:43 1999 ++++ LAPACK/SRC/dgeesx.f Fri May 25 16:00:13 2001 +@@ -6,6 +6,7 @@ + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University + * June 30, 1999 ++* 8-15-00: Do WS calculations if LWORK = -1 (eca) + * + * .. Scalar Arguments .. + CHARACTER JOBVS, SENSE, SORT +@@ -140,6 +141,10 @@ + * N+2*SDIM*(N-SDIM) <= N+N*N/2. + * For good performance, LWORK must generally be larger. + * ++* If LWORK = -1, a workspace query is assumed. The optimal ++* size for the WORK array is calculated and stored in WORK(1), ++* and no other work except argument checking is performed. ++* + * IWORK (workspace/output) INTEGER array, dimension (LIWORK) + * Not referenced if SENSE = 'N' or 'E'. + * On exit, if INFO = 0, IWORK(1) returns the optimal LIWORK. +@@ -171,6 +176,8 @@ + * ===================================================================== + * + * .. Parameters .. ++ INTEGER LQUERV ++ PARAMETER ( LQUERV = -1 ) + DOUBLE PRECISION ZERO, ONE + PARAMETER ( ZERO = 0.0D0, ONE = 1.0D0 ) + * .. +@@ -186,8 +193,8 @@ + DOUBLE PRECISION DUM( 1 ) + * .. + * .. External Subroutines .. +- EXTERNAL DCOPY, DGEBAK, DGEBAL, DGEHRD, DHSEQR, DLACPY, +- $ DLASCL, DORGHR, DSWAP, DTRSEN, XERBLA ++ EXTERNAL DCOPY, DGEBAK, DGEBAL, DGEHRD, DHSEQR, DLABAD, ++ $ DLACPY, DLASCL, DORGHR, DSWAP, DTRSEN, XERBLA + * .. + * .. External Functions .. + LOGICAL LSAME +@@ -239,7 +246,7 @@ + * in the code.) + * + MINWRK = 1 +- IF( INFO.EQ.0 .AND. LWORK.GE.1 ) THEN ++ IF( INFO.EQ.0 ) THEN + MAXWRK = 2*N + N*ILAENV( 1, 'DGEHRD', ' ', N, 1, N, 0 ) + MINWRK = MAX( 1, 3*N ) + IF( .NOT.WANTVS ) THEN +@@ -257,21 +264,25 @@ + HSWORK = MAX( K*( K+2 ), 2*N ) + MAXWRK = MAX( MAXWRK, N+HSWORK, 1 ) + END IF ++* ++* Estimate the workspace needed by DTRSEN. ++* ++ IF( WANTST ) THEN ++ MAXWRK = MAX( MAXWRK, N+( N*N+1 ) / 2 ) ++ END IF + WORK( 1 ) = MAXWRK ++ IF( LWORK.LT.MINWRK .AND. LWORK.NE.LQUERV ) ++ $ INFO = -16 + END IF +- IF( LWORK.LT.MINWRK ) THEN +- INFO = -16 +- END IF +- IF( LIWORK.LT.1 ) THEN +- INFO = -18 +- END IF ++* ++* Quick returns ++* + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'DGEESX', -INFO ) + RETURN + END IF +-* +-* Quick return if possible +-* ++ IF( LWORK.EQ.LQUERV ) ++ $ RETURN + IF( N.EQ.0 ) THEN + SDIM = 0 + RETURN +diff -uNr LAPACK.orig/SRC/dgeev.f LAPACK/SRC/dgeev.f +--- LAPACK.orig/SRC/dgeev.f Wed Dec 8 16:00:35 1999 ++++ LAPACK/SRC/dgeev.f Fri May 25 16:00:43 2001 +@@ -4,7 +4,8 @@ + * -- LAPACK driver routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* December 8, 1999 ++* June 30, 1999 ++* 8-15-00: Improve consistency of WS calculations (eca) + * + * .. Scalar Arguments .. + CHARACTER JOBVL, JOBVR +@@ -98,10 +99,9 @@ + * if JOBVL = 'V' or JOBVR = 'V', LWORK >= 4*N. For good + * performance, LWORK must generally be larger. + * +-* If LWORK = -1, then a workspace query is assumed; the routine +-* only calculates the optimal size of the WORK array, returns +-* this value as the first entry of the WORK array, and no error +-* message related to LWORK is issued by XERBLA. ++* If LWORK = -1, a workspace query is assumed. The optimal ++* size for the WORK array is calculated and stored in WORK(1), ++* and no other work except argument checking is performed. + * + * INFO (output) INTEGER + * = 0: successful exit +@@ -114,11 +114,13 @@ + * ===================================================================== + * + * .. Parameters .. ++ INTEGER LQUERV ++ PARAMETER ( LQUERV = -1 ) + DOUBLE PRECISION ZERO, ONE + PARAMETER ( ZERO = 0.0D0, ONE = 1.0D0 ) + * .. + * .. Local Scalars .. +- LOGICAL LQUERY, SCALEA, WANTVL, WANTVR ++ LOGICAL SCALEA, WANTVL, WANTVR + CHARACTER SIDE + INTEGER HSWORK, I, IBAL, IERR, IHI, ILO, ITAU, IWRK, K, + $ MAXB, MAXWRK, MINWRK, NOUT +@@ -130,8 +132,9 @@ + DOUBLE PRECISION DUM( 1 ) + * .. + * .. External Subroutines .. +- EXTERNAL DGEBAK, DGEBAL, DGEHRD, DHSEQR, DLACPY, DLARTG, +- $ DLASCL, DORGHR, DROT, DSCAL, DTREVC, XERBLA ++ EXTERNAL DGEBAK, DGEBAL, DGEHRD, DHSEQR, DLABAD, DLACPY, ++ $ DLARTG, DLASCL, DORGHR, DROT, DSCAL, DTREVC, ++ $ XERBLA + * .. + * .. External Functions .. + LOGICAL LSAME +@@ -148,7 +151,6 @@ + * Test the input arguments + * + INFO = 0 +- LQUERY = ( LWORK.EQ.-1 ) + WANTVL = LSAME( JOBVL, 'V' ) + WANTVR = LSAME( JOBVR, 'V' ) + IF( ( .NOT.WANTVL ) .AND. ( .NOT.LSAME( JOBVL, 'N' ) ) ) THEN +@@ -176,7 +178,7 @@ + * the worst case.) + * + MINWRK = 1 +- IF( INFO.EQ.0 .AND. ( LWORK.GE.1 .OR. LQUERY ) ) THEN ++ IF( INFO.EQ.0 ) THEN + MAXWRK = 2*N + N*ILAENV( 1, 'DGEHRD', ' ', N, 1, N, 0 ) + IF( ( .NOT.WANTVL ) .AND. ( .NOT.WANTVR ) ) THEN + MINWRK = MAX( 1, 3*N ) +@@ -197,19 +199,18 @@ + MAXWRK = MAX( MAXWRK, 4*N ) + END IF + WORK( 1 ) = MAXWRK ++ IF( LWORK.LT.MINWRK .AND. LWORK.NE.LQUERV ) ++ $ INFO = -13 + END IF +- IF( LWORK.LT.MINWRK .AND. .NOT.LQUERY ) THEN +- INFO = -13 +- END IF ++* ++* Quick returns ++* + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'DGEEV ', -INFO ) + RETURN +- ELSE IF( LQUERY ) THEN +- RETURN + END IF +-* +-* Quick return if possible +-* ++ IF( LWORK.EQ.LQUERV ) ++ $ RETURN + IF( N.EQ.0 ) + $ RETURN + * +diff -uNr LAPACK.orig/SRC/dgeevx.f LAPACK/SRC/dgeevx.f +--- LAPACK.orig/SRC/dgeevx.f Thu Nov 4 14:24:43 1999 ++++ LAPACK/SRC/dgeevx.f Fri May 25 16:01:05 2001 +@@ -6,6 +6,7 @@ + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University + * June 30, 1999 ++* 8-15-00: Improve consistency of WS calculations (eca) + * + * .. Scalar Arguments .. + CHARACTER BALANC, JOBVL, JOBVR, SENSE +@@ -179,10 +180,9 @@ + * LWORK >= 3*N. If SENSE = 'V' or 'B', LWORK >= N*(N+6). + * For good performance, LWORK must generally be larger. + * +-* If LWORK = -1, then a workspace query is assumed; the routine +-* only calculates the optimal size of the WORK array, returns +-* this value as the first entry of the WORK array, and no error +-* message related to LWORK is issued by XERBLA. ++* If LWORK = -1, a workspace query is assumed. The optimal ++* size for the WORK array is calculated and stored in WORK(1), ++* and no other work except argument checking is performed. + * + * IWORK (workspace) INTEGER array, dimension (2*N-2) + * If SENSE = 'N' or 'E', not referenced. +@@ -198,12 +198,14 @@ + * ===================================================================== + * + * .. Parameters .. ++ INTEGER LQUERV ++ PARAMETER ( LQUERV = -1 ) + DOUBLE PRECISION ZERO, ONE + PARAMETER ( ZERO = 0.0D0, ONE = 1.0D0 ) + * .. + * .. Local Scalars .. +- LOGICAL LQUERY, SCALEA, WANTVL, WANTVR, WNTSNB, WNTSNE, +- $ WNTSNN, WNTSNV ++ LOGICAL SCALEA, WANTVL, WANTVR, WNTSNB, WNTSNE, WNTSNN, ++ $ WNTSNV + CHARACTER JOB, SIDE + INTEGER HSWORK, I, ICOND, IERR, ITAU, IWRK, K, MAXB, + $ MAXWRK, MINWRK, NOUT +@@ -215,9 +217,9 @@ + DOUBLE PRECISION DUM( 1 ) + * .. + * .. External Subroutines .. +- EXTERNAL DGEBAK, DGEBAL, DGEHRD, DHSEQR, DLACPY, DLARTG, +- $ DLASCL, DORGHR, DROT, DSCAL, DTREVC, DTRSNA, +- $ XERBLA ++ EXTERNAL DGEBAK, DGEBAL, DGEHRD, DHSEQR, DLABAD, DLACPY, ++ $ DLARTG, DLASCL, DORGHR, DROT, DSCAL, DTREVC, ++ $ DTRSNA, XERBLA + * .. + * .. External Functions .. + LOGICAL LSAME +@@ -234,7 +236,6 @@ + * Test the input arguments + * + INFO = 0 +- LQUERY = ( LWORK.EQ.-1 ) + WANTVL = LSAME( JOBVL, 'V' ) + WANTVR = LSAME( JOBVR, 'V' ) + WNTSNN = LSAME( SENSE, 'N' ) +@@ -274,7 +275,7 @@ + * the worst case.) + * + MINWRK = 1 +- IF( INFO.EQ.0 .AND. ( LWORK.GE.1 .OR. LQUERY ) ) THEN ++ IF( INFO.EQ.0 ) THEN + MAXWRK = N + N*ILAENV( 1, 'DGEHRD', ' ', N, 1, N, 0 ) + IF( ( .NOT.WANTVL ) .AND. ( .NOT.WANTVR ) ) THEN + MINWRK = MAX( 1, 2*N ) +@@ -308,19 +309,18 @@ + MAXWRK = MAX( MAXWRK, 3*N, 1 ) + END IF + WORK( 1 ) = MAXWRK ++ IF( LWORK.LT.MINWRK .AND. LWORK.NE.LQUERV ) ++ $ INFO = -21 + END IF +- IF( LWORK.LT.MINWRK .AND. .NOT.LQUERY ) THEN +- INFO = -21 +- END IF ++* ++* Quick returns ++* + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'DGEEVX', -INFO ) + RETURN +- ELSE IF( LQUERY ) THEN +- RETURN + END IF +-* +-* Quick return if possible +-* ++ IF( LWORK.EQ.LQUERV ) ++ $ RETURN + IF( N.EQ.0 ) + $ RETURN + * +diff -uNr LAPACK.orig/SRC/dgegs.f LAPACK/SRC/dgegs.f +--- LAPACK.orig/SRC/dgegs.f Thu Nov 4 14:24:43 1999 ++++ LAPACK/SRC/dgegs.f Fri May 25 16:01:53 2001 +@@ -5,7 +5,7 @@ + * -- LAPACK driver routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* June 30, 1999 ++* April 26, 2001 + * + * .. Scalar Arguments .. + CHARACTER JOBVSL, JOBVSR +@@ -22,105 +22,75 @@ + * + * This routine is deprecated and has been replaced by routine DGGES. + * +-* DGEGS computes for a pair of N-by-N real nonsymmetric matrices A, B: +-* the generalized eigenvalues (alphar +/- alphai*i, beta), the real +-* Schur form (A, B), and optionally left and/or right Schur vectors +-* (VSL and VSR). +-* +-* (If only the generalized eigenvalues are needed, use the driver DGEGV +-* instead.) +-* +-* A generalized eigenvalue for a pair of matrices (A,B) is, roughly +-* speaking, a scalar w or a ratio alpha/beta = w, such that A - w*B +-* is singular. It is usually represented as the pair (alpha,beta), +-* as there is a reasonable interpretation for beta=0, and even for +-* both being zero. A good beginning reference is the book, "Matrix +-* Computations", by G. Golub & C. van Loan (Johns Hopkins U. Press) +-* +-* The (generalized) Schur form of a pair of matrices is the result of +-* multiplying both matrices on the left by one orthogonal matrix and +-* both on the right by another orthogonal matrix, these two orthogonal +-* matrices being chosen so as to bring the pair of matrices into +-* (real) Schur form. +-* +-* A pair of matrices A, B is in generalized real Schur form if B is +-* upper triangular with non-negative diagonal and A is block upper +-* triangular with 1-by-1 and 2-by-2 blocks. 1-by-1 blocks correspond +-* to real generalized eigenvalues, while 2-by-2 blocks of A will be +-* "standardized" by making the corresponding elements of B have the +-* form: +-* [ a 0 ] +-* [ 0 b ] +-* +-* and the pair of corresponding 2-by-2 blocks in A and B will +-* have a complex conjugate pair of generalized eigenvalues. +-* +-* The left and right Schur vectors are the columns of VSL and VSR, +-* respectively, where VSL and VSR are the orthogonal matrices +-* which reduce A and B to Schur form: +-* +-* Schur form of (A,B) = ( (VSL)**T A (VSR), (VSL)**T B (VSR) ) ++* DGEGS computes the eigenvalues, real Schur form, and, optionally, ++* left and or/right Schur vectors of a real matrix pair (A,B). ++* Given two square matrices A and B, the generalized real Schur ++* factorization has the form ++* ++* A = Q*S*Z**T, B = Q*T*Z**T ++* ++* where Q and Z are orthogonal matrices, T is upper triangular, and S ++* is an upper quasi-triangular matrix with 1-by-1 and 2-by-2 diagonal ++* blocks, the 2-by-2 blocks corresponding to complex conjugate pairs ++* of eigenvalues of (A,B). The columns of Q are the left Schur vectors ++* and the columns of Z are the right Schur vectors. ++* ++* If only the eigenvalues of (A,B) are needed, the driver routine ++* DGEGV should be used instead. See DGEGV for a description of the ++* eigenvalues of the generalized nonsymmetric eigenvalue problem ++* (GNEP). + * + * Arguments + * ========= + * + * JOBVSL (input) CHARACTER*1 + * = 'N': do not compute the left Schur vectors; +-* = 'V': compute the left Schur vectors. ++* = 'V': compute the left Schur vectors (returned in VSL). + * + * JOBVSR (input) CHARACTER*1 + * = 'N': do not compute the right Schur vectors; +-* = 'V': compute the right Schur vectors. ++* = 'V': compute the right Schur vectors (returned in VSR). + * + * N (input) INTEGER + * The order of the matrices A, B, VSL, and VSR. N >= 0. + * + * A (input/output) DOUBLE PRECISION array, dimension (LDA, N) +-* On entry, the first of the pair of matrices whose generalized +-* eigenvalues and (optionally) Schur vectors are to be +-* computed. +-* On exit, the generalized Schur form of A. +-* Note: to avoid overflow, the Frobenius norm of the matrix +-* A should be less than the overflow threshold. ++* On entry, the matrix A. ++* On exit, the upper quasi-triangular matrix S from the ++* generalized real Schur factorization. + * + * LDA (input) INTEGER + * The leading dimension of A. LDA >= max(1,N). + * + * B (input/output) DOUBLE PRECISION array, dimension (LDB, N) +-* On entry, the second of the pair of matrices whose +-* generalized eigenvalues and (optionally) Schur vectors are +-* to be computed. +-* On exit, the generalized Schur form of B. +-* Note: to avoid overflow, the Frobenius norm of the matrix +-* B should be less than the overflow threshold. ++* On entry, the matrix B. ++* On exit, the upper triangular matrix T from the generalized ++* real Schur factorization. + * + * LDB (input) INTEGER + * The leading dimension of B. LDB >= max(1,N). + * + * ALPHAR (output) DOUBLE PRECISION array, dimension (N) ++* The real parts of each scalar alpha defining an eigenvalue ++* of GNEP. ++* + * ALPHAI (output) DOUBLE PRECISION array, dimension (N) +-* BETA (output) DOUBLE PRECISION array, dimension (N) +-* On exit, (ALPHAR(j) + ALPHAI(j)*i)/BETA(j), j=1,...,N, will +-* be the generalized eigenvalues. ALPHAR(j) + ALPHAI(j)*i, +-* j=1,...,N and BETA(j),j=1,...,N are the diagonals of the +-* complex Schur form (A,B) that would result if the 2-by-2 +-* diagonal blocks of the real Schur form of (A,B) were further +-* reduced to triangular form using 2-by-2 complex unitary +-* transformations. If ALPHAI(j) is zero, then the j-th ++* The imaginary parts of each scalar alpha defining an ++* eigenvalue of GNEP. If ALPHAI(j) is zero, then the j-th + * eigenvalue is real; if positive, then the j-th and (j+1)-st +-* eigenvalues are a complex conjugate pair, with ALPHAI(j+1) +-* negative. ++* eigenvalues are a complex conjugate pair, with ++* ALPHAI(j+1) = -ALPHAI(j). + * +-* Note: the quotients ALPHAR(j)/BETA(j) and ALPHAI(j)/BETA(j) +-* may easily over- or underflow, and BETA(j) may even be zero. +-* Thus, the user should avoid naively computing the ratio +-* alpha/beta. However, ALPHAR and ALPHAI will be always less +-* than and usually comparable with norm(A) in magnitude, and +-* BETA always less than and usually comparable with norm(B). ++* BETA (output) DOUBLE PRECISION array, dimension (N) ++* The scalars beta that define the eigenvalues of GNEP. ++* Together, the quantities alpha = (ALPHAR(j),ALPHAI(j)) and ++* beta = BETA(j) represent the j-th eigenvalue of the matrix ++* pair (A,B), in one of the forms lambda = alpha/beta or ++* mu = beta/alpha. Since either lambda or mu may overflow, ++* they should not, in general, be computed. + * + * VSL (output) DOUBLE PRECISION array, dimension (LDVSL,N) +-* If JOBVSL = 'V', VSL will contain the left Schur vectors. +-* (See "Purpose", above.) ++* If JOBVSL = 'V', the matrix of left Schur vectors Q. + * Not referenced if JOBVSL = 'N'. + * + * LDVSL (input) INTEGER +@@ -128,8 +98,7 @@ + * if JOBVSL = 'V', LDVSL >= N. + * + * VSR (output) DOUBLE PRECISION array, dimension (LDVSR,N) +-* If JOBVSR = 'V', VSR will contain the right Schur vectors. +-* (See "Purpose", above.) ++* If JOBVSR = 'V', the matrix of right Schur vectors Z. + * Not referenced if JOBVSR = 'N'. + * + * LDVSR (input) INTEGER +diff -uNr LAPACK.orig/SRC/dgegv.f LAPACK/SRC/dgegv.f +--- LAPACK.orig/SRC/dgegv.f Thu Nov 4 14:25:43 1999 ++++ LAPACK/SRC/dgegv.f Fri May 25 16:02:16 2001 +@@ -4,7 +4,7 @@ + * -- LAPACK driver routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* June 30, 1999 ++* April 26, 2001 + * + * .. Scalar Arguments .. + CHARACTER JOBVL, JOBVR +@@ -21,23 +21,32 @@ + * + * This routine is deprecated and has been replaced by routine DGGEV. + * +-* DGEGV computes for a pair of n-by-n real nonsymmetric matrices A and +-* B, the generalized eigenvalues (alphar +/- alphai*i, beta), and +-* optionally, the left and/or right generalized eigenvectors (VL and +-* VR). +-* +-* A generalized eigenvalue for a pair of matrices (A,B) is, roughly +-* speaking, a scalar w or a ratio alpha/beta = w, such that A - w*B +-* is singular. It is usually represented as the pair (alpha,beta), +-* as there is a reasonable interpretation for beta=0, and even for +-* both being zero. A good beginning reference is the book, "Matrix +-* Computations", by G. Golub & C. van Loan (Johns Hopkins U. Press) +-* +-* A right generalized eigenvector corresponding to a generalized +-* eigenvalue w for a pair of matrices (A,B) is a vector r such +-* that (A - w B) r = 0 . A left generalized eigenvector is a vector +-* l such that l**H * (A - w B) = 0, where l**H is the +-* conjugate-transpose of l. ++* DGEGV computes the eigenvalues and, optionally, the left and/or right ++* eigenvectors of a real matrix pair (A,B). ++* Given two square matrices A and B, ++* the generalized nonsymmetric eigenvalue problem (GNEP) is to find the ++* eigenvalues lambda and corresponding (non-zero) eigenvectors x such ++* that ++* ++* A*x = lambda*B*x. ++* ++* An alternate form is to find the eigenvalues mu and corresponding ++* eigenvectors y such that ++* ++* mu*A*y = B*y. ++* ++* These two forms are equivalent with mu = 1/lambda and x = y if ++* neither lambda nor mu is zero. In order to deal with the case that ++* lambda or mu is zero or small, two values alpha and beta are returned ++* for each eigenvalue, such that lambda = alpha/beta and ++* mu = beta/alpha. ++* ++* The vectors x and y in the above equations are right eigenvectors of ++* the matrix pair (A,B). Vectors u and v satisfying ++* ++* u**H*A = lambda*u**H*B or mu*v**H*A = v**H*B ++* ++* are left eigenvectors of (A,B). + * + * Note: this routine performs "full balancing" on A and B -- see + * "Further Details", below. +@@ -47,63 +56,75 @@ + * + * JOBVL (input) CHARACTER*1 + * = 'N': do not compute the left generalized eigenvectors; +-* = 'V': compute the left generalized eigenvectors. ++* = 'V': compute the left generalized eigenvectors (returned ++* in VL). + * + * JOBVR (input) CHARACTER*1 + * = 'N': do not compute the right generalized eigenvectors; +-* = 'V': compute the right generalized eigenvectors. ++* = 'V': compute the right generalized eigenvectors (returned ++* in VR). + * + * N (input) INTEGER + * The order of the matrices A, B, VL, and VR. N >= 0. + * + * A (input/output) DOUBLE PRECISION array, dimension (LDA, N) +-* On entry, the first of the pair of matrices whose +-* generalized eigenvalues and (optionally) generalized +-* eigenvectors are to be computed. +-* On exit, the contents will have been destroyed. (For a +-* description of the contents of A on exit, see "Further +-* Details", below.) ++* On entry, the matrix A. ++* If JOBVL = 'V' or JOBVR = 'V', then on exit A ++* contains the real Schur form of A from the generalized Schur ++* factorization of the pair (A,B) after balancing. ++* If no eigenvectors were computed, then only the diagonal ++* blocks from the Schur form will be correct. See DGGHRD and ++* DHGEQZ for details. + * + * LDA (input) INTEGER + * The leading dimension of A. LDA >= max(1,N). + * + * B (input/output) DOUBLE PRECISION array, dimension (LDB, N) +-* On entry, the second of the pair of matrices whose +-* generalized eigenvalues and (optionally) generalized +-* eigenvectors are to be computed. +-* On exit, the contents will have been destroyed. (For a +-* description of the contents of B on exit, see "Further +-* Details", below.) ++* On entry, the matrix B. ++* If JOBVL = 'V' or JOBVR = 'V', then on exit B contains the ++* upper triangular matrix obtained from B in the generalized ++* Schur factorization of the pair (A,B) after balancing. ++* If no eigenvectors were computed, then only those elements of ++* B corresponding to the diagonal blocks from the Schur form of ++* A will be correct. See DGGHRD and DHGEQZ for details. + * + * LDB (input) INTEGER + * The leading dimension of B. LDB >= max(1,N). + * + * ALPHAR (output) DOUBLE PRECISION array, dimension (N) ++* The real parts of each scalar alpha defining an eigenvalue of ++* GNEP. ++* + * ALPHAI (output) DOUBLE PRECISION array, dimension (N) +-* BETA (output) DOUBLE PRECISION array, dimension (N) +-* On exit, (ALPHAR(j) + ALPHAI(j)*i)/BETA(j), j=1,...,N, will +-* be the generalized eigenvalues. If ALPHAI(j) is zero, then +-* the j-th eigenvalue is real; if positive, then the j-th and ++* The imaginary parts of each scalar alpha defining an ++* eigenvalue of GNEP. If ALPHAI(j) is zero, then the j-th ++* eigenvalue is real; if positive, then the j-th and + * (j+1)-st eigenvalues are a complex conjugate pair, with +-* ALPHAI(j+1) negative. ++* ALPHAI(j+1) = -ALPHAI(j). + * +-* Note: the quotients ALPHAR(j)/BETA(j) and ALPHAI(j)/BETA(j) +-* may easily over- or underflow, and BETA(j) may even be zero. +-* Thus, the user should avoid naively computing the ratio +-* alpha/beta. However, ALPHAR and ALPHAI will be always less +-* than and usually comparable with norm(A) in magnitude, and +-* BETA always less than and usually comparable with norm(B). ++* BETA (output) DOUBLE PRECISION array, dimension (N) ++* The scalars beta that define the eigenvalues of GNEP. ++* ++* Together, the quantities alpha = (ALPHAR(j),ALPHAI(j)) and ++* beta = BETA(j) represent the j-th eigenvalue of the matrix ++* pair (A,B), in one of the forms lambda = alpha/beta or ++* mu = beta/alpha. Since either lambda or mu may overflow, ++* they should not, in general, be computed. + * + * VL (output) DOUBLE PRECISION array, dimension (LDVL,N) +-* If JOBVL = 'V', the left generalized eigenvectors. (See +-* "Purpose", above.) Real eigenvectors take one column, +-* complex take two columns, the first for the real part and +-* the second for the imaginary part. Complex eigenvectors +-* correspond to an eigenvalue with positive imaginary part. +-* Each eigenvector will be scaled so the largest component +-* will have abs(real part) + abs(imag. part) = 1, *except* +-* that for eigenvalues with alpha=beta=0, a zero vector will +-* be returned as the corresponding eigenvector. ++* If JOBVL = 'V', the left eigenvectors u(j) are stored ++* in the columns of VL, in the same order as their eigenvalues. ++* If the j-th eigenvalue is real, then u(j) = VL(:,j). ++* If the j-th and (j+1)-st eigenvalues form a complex conjugate ++* pair, then ++* u(j) = VL(:,j) + i*VL(:,j+1) ++* and ++* u(j+1) = VL(:,j) - i*VL(:,j+1). ++* ++* Each eigenvector is scaled so that its largest component has ++* abs(real part) + abs(imag. part) = 1, except for eigenvectors ++* corresponding to an eigenvalue with alpha = beta = 0, which ++* are set to zero. + * Not referenced if JOBVL = 'N'. + * + * LDVL (input) INTEGER +@@ -111,15 +132,19 @@ + * if JOBVL = 'V', LDVL >= N. + * + * VR (output) DOUBLE PRECISION array, dimension (LDVR,N) +-* If JOBVR = 'V', the right generalized eigenvectors. (See +-* "Purpose", above.) Real eigenvectors take one column, +-* complex take two columns, the first for the real part and +-* the second for the imaginary part. Complex eigenvectors +-* correspond to an eigenvalue with positive imaginary part. +-* Each eigenvector will be scaled so the largest component +-* will have abs(real part) + abs(imag. part) = 1, *except* +-* that for eigenvalues with alpha=beta=0, a zero vector will +-* be returned as the corresponding eigenvector. ++* If JOBVR = 'V', the right eigenvectors x(j) are stored ++* in the columns of VR, in the same order as their eigenvalues. ++* If the j-th eigenvalue is real, then x(j) = VR(:,j). ++* If the j-th and (j+1)-st eigenvalues form a complex conjugate ++* pair, then ++* x(j) = VR(:,j) + i*VR(:,j+1) ++* and ++* x(j+1) = VR(:,j) - i*VR(:,j+1). ++* ++* Each eigenvector is scaled so that its largest component has ++* abs(real part) + abs(imag. part) = 1, except for eigenvalues ++* corresponding to an eigenvalue with alpha = beta = 0, which ++* are set to zero. + * Not referenced if JOBVR = 'N'. + * + * LDVR (input) INTEGER +diff -uNr LAPACK.orig/SRC/dgelsd.f LAPACK/SRC/dgelsd.f +--- LAPACK.orig/SRC/dgelsd.f Thu Nov 4 14:26:25 1999 ++++ LAPACK/SRC/dgelsd.f Fri May 25 16:03:10 2001 +@@ -4,7 +4,8 @@ + * -- LAPACK driver routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* October 31, 1999 ++* June 30, 1999 ++* 8-15-00: Improve consistency of WS calculations (eca) + * + * .. Scalar Arguments .. + INTEGER INFO, LDA, LDB, LWORK, M, N, NRHS, RANK +@@ -61,9 +62,10 @@ + * The number of right hand sides, i.e., the number of columns + * of the matrices B and X. NRHS >= 0. + * +-* A (input) DOUBLE PRECISION array, dimension (LDA,N) ++* A (input/output) DOUBLE PRECISION array, dimension (LDA,N) + * On entry, the M-by-N matrix A. +-* On exit, A has been destroyed. ++* On exit, the first min(m,n) rows of A are overwritten with ++* its right singular vectors, stored rowwise. + * + * LDA (input) INTEGER + * The leading dimension of the array A. LDA >= max(1,M). +@@ -95,23 +97,20 @@ + * On exit, if INFO = 0, WORK(1) returns the optimal LWORK. + * + * LWORK (input) INTEGER +-* The dimension of the array WORK. LWORK must be at least 1. ++* The dimension of the array WORK. LWORK >= 1. + * The exact minimum amount of workspace needed depends on M, +-* N and NRHS. As long as LWORK is at least +-* 12*N + 2*N*SMLSIZ + 8*N*NLVL + N*NRHS + (SMLSIZ+1)**2, +-* if M is greater than or equal to N or +-* 12*M + 2*M*SMLSIZ + 8*M*NLVL + M*NRHS + (SMLSIZ+1)**2, +-* if M is less than N, the code will execute correctly. ++* N and NRHS. ++* If M >= N, LWORK >= 11*N + 2*N*SMLSIZ + 8*N*NLVL + N*NRHS. ++* If M < N, LWORK >= 11*M + 2*M*SMLSIZ + 8*M*NLVL + M*NRHS. + * SMLSIZ is returned by ILAENV and is equal to the maximum + * size of the subproblems at the bottom of the computation + * tree (usually about 25), and +-* NLVL = MAX( 0, INT( LOG_2( MIN( M,N )/(SMLSIZ+1) ) ) + 1 ) ++* NLVL = INT( LOG_2( MIN( M,N )/(SMLSIZ+1) ) ) + 1 + * For good performance, LWORK should generally be larger. + * +-* If LWORK = -1, then a workspace query is assumed; the routine +-* only calculates the optimal size of the WORK array, returns +-* this value as the first entry of the WORK array, and no error +-* message related to LWORK is issued by XERBLA. ++* If LWORK = -1, a workspace query is assumed. The optimal ++* size for the WORK array is calculated and stored in WORK(1), ++* and no other work except argument checking is performed. + * + * IWORK (workspace) INTEGER array, dimension (LIWORK) + * LIWORK >= 3 * MINMN * NLVL + 11 * MINMN, +@@ -135,14 +134,15 @@ + * ===================================================================== + * + * .. Parameters .. ++ INTEGER LQUERV ++ PARAMETER ( LQUERV = -1 ) + DOUBLE PRECISION ZERO, ONE, TWO + PARAMETER ( ZERO = 0.0D0, ONE = 1.0D0, TWO = 2.0D0 ) + * .. + * .. Local Scalars .. +- LOGICAL LQUERY + INTEGER IASCL, IBSCL, IE, IL, ITAU, ITAUP, ITAUQ, + $ LDWORK, MAXMN, MAXWRK, MINMN, MINWRK, MM, +- $ MNTHR, NLVL, NWORK, SMLSIZ, WLALSD ++ $ MNTHR, NLVL, NWORK, SMLSIZ + DOUBLE PRECISION ANRM, BIGNUM, BNRM, EPS, SFMIN, SMLNUM + * .. + * .. External Subroutines .. +@@ -165,7 +165,6 @@ + MINMN = MIN( M, N ) + MAXMN = MAX( M, N ) + MNTHR = ILAENV( 6, 'DGELSD', ' ', M, N, NRHS, -1 ) +- LQUERY = ( LWORK.EQ.-1 ) + IF( M.LT.0 ) THEN + INFO = -1 + ELSE IF( N.LT.0 ) THEN +@@ -189,8 +188,8 @@ + * + MINWRK = 1 + MINMN = MAX( 1, MINMN ) +- NLVL = MAX( INT( LOG( DBLE( MINMN ) / DBLE( SMLSIZ+1 ) ) / +- $ LOG( TWO ) ) + 1, 0 ) ++ NLVL = INT( LOG( DBLE( MINMN ) / DBLE( SMLSIZ+1 ) ) / LOG( TWO ) ) ++ $ + 1 + * + IF( INFO.EQ.0 ) THEN + MAXWRK = 0 +@@ -215,12 +214,11 @@ + $ ILAENV( 1, 'DORMBR', 'QLT', MM, NRHS, N, -1 ) ) + MAXWRK = MAX( MAXWRK, 3*N+( N-1 )* + $ ILAENV( 1, 'DORMBR', 'PLN', N, NRHS, N, -1 ) ) +- WLALSD = 9*N+2*N*SMLSIZ+8*N*NLVL+N*NRHS+(SMLSIZ+1)**2 +- MAXWRK = MAX( MAXWRK, 3*N+WLALSD ) +- MINWRK = MAX( 3*N+MM, 3*N+NRHS, 3*N+WLALSD ) ++ MAXWRK = MAX( MAXWRK, 3*N+8*N+2*N*SMLSIZ+8*N*NLVL+N*NRHS ) ++ MINWRK = MAX( 3*N+MM, 3*N+NRHS, ++ $ 3*N+8*N+2*N*SMLSIZ+8*N*NLVL+N*NRHS ) + END IF + IF( N.GT.M ) THEN +- WLALSD = 9*M+2*M*SMLSIZ+8*M*NLVL+M*NRHS+(SMLSIZ+1)**2 + IF( N.GE.MNTHR ) THEN + * + * Path 2a - underdetermined, with many more columns +@@ -240,7 +238,8 @@ + END IF + MAXWRK = MAX( MAXWRK, M+NRHS* + $ ILAENV( 1, 'DORMLQ', 'LT', N, NRHS, M, -1 ) ) +- MAXWRK = MAX( MAXWRK, M*M+4*M+WLALSD ) ++ MAXWRK = MAX( MAXWRK, M*M+4*M+8*M+2*M*SMLSIZ+8*M*NLVL+M* ++ $ NRHS ) + ELSE + * + * Path 2 - remaining underdetermined cases. +@@ -251,26 +250,26 @@ + $ ILAENV( 1, 'DORMBR', 'QLT', M, NRHS, N, -1 ) ) + MAXWRK = MAX( MAXWRK, 3*M+M* + $ ILAENV( 1, 'DORMBR', 'PLN', N, NRHS, M, -1 ) ) +- MAXWRK = MAX( MAXWRK, 3*M+WLALSD ) ++ MAXWRK = MAX( MAXWRK, 3*M+8*M+2*M*SMLSIZ+8*M*NLVL+M* ++ $ NRHS ) + END IF +- MINWRK = MAX( 3*M+NRHS, 3*M+M, 3*M+WLALSD ) ++ MINWRK = MAX( 3*M+NRHS, 3*M+M, ++ $ 3*M+8*M+2*M*SMLSIZ+8*M*NLVL+M*NRHS ) + END IF + MINWRK = MIN( MINWRK, MAXWRK ) + WORK( 1 ) = MAXWRK +- IF( LWORK.LT.MINWRK .AND. .NOT.LQUERY ) THEN +- INFO = -12 +- END IF ++ IF( LWORK.LT.MINWRK .AND. LWORK.NE.LQUERV ) ++ $ INFO = -12 + END IF + * ++* Quick returns ++* + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'DGELSD', -INFO ) + RETURN +- ELSE IF( LQUERY ) THEN +- GO TO 10 + END IF +-* +-* Quick return if possible. +-* ++ IF( LWORK.EQ.LQUERV ) ++ $ RETURN + IF( M.EQ.0 .OR. N.EQ.0 ) THEN + RANK = 0 + RETURN +diff -uNr LAPACK.orig/SRC/dgelss.f LAPACK/SRC/dgelss.f +--- LAPACK.orig/SRC/dgelss.f Thu Nov 4 14:24:44 1999 ++++ LAPACK/SRC/dgelss.f Fri May 25 16:03:46 2001 +@@ -4,7 +4,7 @@ + * -- LAPACK driver routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* October 31, 1999 ++* April 25, 2001 + * + * .. Scalar Arguments .. + INTEGER INFO, LDA, LDB, LWORK, M, N, NRHS, RANK +@@ -86,10 +86,9 @@ + * LWORK >= 3*min(M,N) + max( 2*min(M,N), max(M,N), NRHS ) + * For good performance, LWORK should generally be larger. + * +-* If LWORK = -1, then a workspace query is assumed; the routine +-* only calculates the optimal size of the WORK array, returns +-* this value as the first entry of the WORK array, and no error +-* message related to LWORK is issued by XERBLA. ++* If LWORK = -1, a workspace query is assumed. The optimal ++* size for the WORK array is calculated and stored in WORK(1), ++* and no other work except argument checking is performed. + * + * INFO (output) INTEGER + * = 0: successful exit +@@ -156,7 +155,7 @@ + * following subroutine, as returned by ILAENV.) + * + MINWRK = 1 +- IF( INFO.EQ.0 .AND. ( LWORK.GE.1 .OR. LQUERY ) ) THEN ++ IF( INFO.EQ.0 ) THEN + MAXWRK = 0 + MM = M + IF( M.GE.N .AND. M.GE.MNTHR ) THEN +@@ -229,20 +228,18 @@ + END IF + MAXWRK = MAX( MINWRK, MAXWRK ) + WORK( 1 ) = MAXWRK ++ IF( LWORK.LT.MINWRK .AND. .NOT.LQUERY ) ++ $ INFO = -12 + END IF + * +- MINWRK = MAX( MINWRK, 1 ) +- IF( LWORK.LT.MINWRK .AND. .NOT.LQUERY ) +- $ INFO = -12 ++* Quick returns ++* + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'DGELSS', -INFO ) + RETURN + ELSE IF( LQUERY ) THEN + RETURN + END IF +-* +-* Quick return if possible +-* + IF( M.EQ.0 .OR. N.EQ.0 ) THEN + RANK = 0 + RETURN +@@ -491,8 +488,8 @@ + DO 40 I = 1, NRHS, CHUNK + BL = MIN( NRHS-I+1, CHUNK ) + CALL DGEMM( 'T', 'N', M, BL, M, ONE, WORK( IL ), LDWORK, +- $ B( 1, I ), LDB, ZERO, WORK( IWORK ), N ) +- CALL DLACPY( 'G', M, BL, WORK( IWORK ), N, B( 1, I ), ++ $ B( 1, I ), LDB, ZERO, WORK( IWORK ), M ) ++ CALL DLACPY( 'G', M, BL, WORK( IWORK ), M, B( 1, I ), + $ LDB ) + 40 CONTINUE + ELSE +diff -uNr LAPACK.orig/SRC/dgesdd.f LAPACK/SRC/dgesdd.f +--- LAPACK.orig/SRC/dgesdd.f Thu Nov 11 20:32:31 1999 ++++ LAPACK/SRC/dgesdd.f Fri May 25 16:07:58 2001 +@@ -4,7 +4,8 @@ + * -- LAPACK driver routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* October 31, 1999 ++* June 30, 1999 ++* 8-15-00: Improve consistency of WS calculations (eca) + * + * .. Scalar Arguments .. + CHARACTER JOBZ +@@ -116,16 +117,20 @@ + * LWORK (input) INTEGER + * The dimension of the array WORK. LWORK >= 1. + * If JOBZ = 'N', +-* LWORK >= 3*min(M,N) + max(max(M,N),6*min(M,N)). ++* LWORK >= max(14*min(M,N)+4, 10*min(M,N)+2+ ++* SMLSIZ*(SMLSIZ+8)) + max(M,N) ++* where SMLSIZ is returned by ILAENV and is equal to the ++* maximum size of the subproblems at the bottom of the ++* computation tree (usually about 25). + * If JOBZ = 'O', +-* LWORK >= 3*min(M,N)*min(M,N) + +-* max(max(M,N),5*min(M,N)*min(M,N)+4*min(M,N)). ++* LWORK >= 5*min(M,N)*min(M,N) + max(M,N) + 9*min(M,N). + * If JOBZ = 'S' or 'A' +-* LWORK >= 3*min(M,N)*min(M,N) + +-* max(max(M,N),4*min(M,N)*min(M,N)+4*min(M,N)). ++* LWORK >= 4*min(M,N)*min(M,N) + max(M,N) + 9*min(M,N). + * For good performance, LWORK should generally be larger. +-* If LWORK < 0 but other input arguments are legal, WORK(1) +-* returns the optimal LWORK. ++* ++* If LWORK = -1, a workspace query is assumed. The optimal ++* size for the WORK array is calculated and stored in WORK(1), ++* and no other work except argument checking is performed. + * + * IWORK (workspace) INTEGER array, dimension (8*min(M,N)) + * +@@ -144,15 +149,17 @@ + * ===================================================================== + * + * .. Parameters .. ++ INTEGER LQUERV ++ PARAMETER ( LQUERV = -1 ) + DOUBLE PRECISION ZERO, ONE +- PARAMETER ( ZERO = 0.0D0, ONE = 1.0D0 ) ++ PARAMETER ( ZERO = 0.0D+0, ONE = 1.0D+0 ) + * .. + * .. Local Scalars .. +- LOGICAL LQUERY, WNTQA, WNTQAS, WNTQN, WNTQO, WNTQS +- INTEGER BDSPAC, BLK, CHUNK, I, IE, IERR, IL, ++ LOGICAL WNTQA, WNTQAS, WNTQN, WNTQO, WNTQS ++ INTEGER BDSPAC, BDSPAN, BLK, CHUNK, I, IE, IERR, IL, + $ IR, ISCL, ITAU, ITAUP, ITAUQ, IU, IVT, LDWKVT, + $ LDWRKL, LDWRKR, LDWRKU, MAXWRK, MINMN, MINWRK, +- $ MNTHR, NWORK, WRKBL ++ $ MNTHR, NWORK, SMLSIZ, WRKBL + DOUBLE PRECISION ANRM, BIGNUM, EPS, SMLNUM + * .. + * .. Local Arrays .. +@@ -168,7 +175,7 @@ + LOGICAL LSAME + INTEGER ILAENV + DOUBLE PRECISION DLAMCH, DLANGE +- EXTERNAL DLAMCH, DLANGE, ILAENV, LSAME ++ EXTERNAL LSAME, ILAENV, DLAMCH, DLANGE + * .. + * .. Intrinsic Functions .. + INTRINSIC DBLE, INT, MAX, MIN, SQRT +@@ -187,7 +194,6 @@ + WNTQN = LSAME( JOBZ, 'N' ) + MINWRK = 1 + MAXWRK = 1 +- LQUERY = ( LWORK.EQ.-1 ) + * + IF( .NOT.( WNTQA .OR. WNTQS .OR. WNTQO .OR. WNTQN ) ) THEN + INFO = -1 +@@ -206,6 +212,8 @@ + INFO = -10 + END IF + * ++ SMLSIZ = ILAENV( 9, 'DGESDD', ' ', 0, 0, 0, 0 ) ++* + * Compute workspace + * (Note: Comments in the code beginning "Workspace:" describe the + * minimal amount of workspace needed at that point in the code, +@@ -218,22 +226,19 @@ + * + * Compute space needed for DBDSDC + * +- IF( WNTQN ) THEN +- BDSPAC = 7*N +- ELSE +- BDSPAC = 3*N*N + 4*N +- END IF ++ BDSPAC = 3*N*N + 7*N ++ BDSPAN = MAX( 12*N+4, 8*N+2+SMLSIZ*( SMLSIZ+8 ) ) + IF( M.GE.MNTHR ) THEN + IF( WNTQN ) THEN + * + * Path 1 (M much larger than N, JOBZ='N') + * +- WRKBL = N + N*ILAENV( 1, 'DGEQRF', ' ', M, N, -1, +- $ -1 ) +- WRKBL = MAX( WRKBL, 3*N+2*N* +- $ ILAENV( 1, 'DGEBRD', ' ', N, N, -1, -1 ) ) +- MAXWRK = MAX( WRKBL, BDSPAC+N ) +- MINWRK = BDSPAC + N ++ MAXWRK = N + N*ILAENV( 1, 'DGEQRF', ' ', M, N, -1, ++ $ -1 ) ++ MAXWRK = MAX( MAXWRK, 3*N+2*N* ++ $ ILAENV( 1, 'DGEBRD', ' ', N, N, -1, -1 ) ) ++ MAXWRK = MAX( MAXWRK, BDSPAC ) ++ MINWRK = BDSPAC + ELSE IF( WNTQO ) THEN + * + * Path 2 (M much larger than N, JOBZ='O') +@@ -247,9 +252,9 @@ + $ ILAENV( 1, 'DORMBR', 'QLN', N, N, N, -1 ) ) + WRKBL = MAX( WRKBL, 3*N+N* + $ ILAENV( 1, 'DORMBR', 'PRT', N, N, N, -1 ) ) +- WRKBL = MAX( WRKBL, BDSPAC+3*N ) ++ WRKBL = MAX( WRKBL, BDSPAC+2*N ) + MAXWRK = WRKBL + 2*N*N +- MINWRK = BDSPAC + 2*N*N + 3*N ++ MINWRK = BDSPAC + 2*N*N + 2*N + ELSE IF( WNTQS ) THEN + * + * Path 3 (M much larger than N, JOBZ='S') +@@ -263,9 +268,9 @@ + $ ILAENV( 1, 'DORMBR', 'QLN', N, N, N, -1 ) ) + WRKBL = MAX( WRKBL, 3*N+N* + $ ILAENV( 1, 'DORMBR', 'PRT', N, N, N, -1 ) ) +- WRKBL = MAX( WRKBL, BDSPAC+3*N ) ++ WRKBL = MAX( WRKBL, BDSPAC+2*N ) + MAXWRK = WRKBL + N*N +- MINWRK = BDSPAC + N*N + 3*N ++ MINWRK = BDSPAC + N*N + 2*N + ELSE IF( WNTQA ) THEN + * + * Path 4 (M much larger than N, JOBZ='A') +@@ -279,9 +284,9 @@ + $ ILAENV( 1, 'DORMBR', 'QLN', N, N, N, -1 ) ) + WRKBL = MAX( WRKBL, 3*N+N* + $ ILAENV( 1, 'DORMBR', 'PRT', N, N, N, -1 ) ) +- WRKBL = MAX( WRKBL, BDSPAC+3*N ) +- MAXWRK = WRKBL + N*N +- MINWRK = BDSPAC + N*N + 3*N ++ WRKBL = MAX( WRKBL, BDSPAC+2*N ) ++ MAXWRK = N*N + WRKBL ++ MINWRK = BDSPAC + N*N + M + N + END IF + ELSE + * +@@ -289,53 +294,47 @@ + * + WRKBL = 3*N + ( M+N )*ILAENV( 1, 'DGEBRD', ' ', M, N, -1, + $ -1 ) +- IF( WNTQN ) THEN +- MAXWRK = MAX( WRKBL, BDSPAC+3*N ) +- MINWRK = 3*N + MAX( M, BDSPAC ) +- ELSE IF( WNTQO ) THEN ++ IF( WNTQO ) THEN + WRKBL = MAX( WRKBL, 3*N+N* + $ ILAENV( 1, 'DORMBR', 'QLN', M, N, N, -1 ) ) + WRKBL = MAX( WRKBL, 3*N+N* + $ ILAENV( 1, 'DORMBR', 'PRT', N, N, N, -1 ) ) +- WRKBL = MAX( WRKBL, BDSPAC+3*N ) ++ WRKBL = MAX( WRKBL, BDSPAC+2*N+M ) + MAXWRK = WRKBL + M*N +- MINWRK = 3*N + MAX( M, N*N+BDSPAC ) ++ MINWRK = BDSPAC + N*N + 2*N + M + ELSE IF( WNTQS ) THEN +- WRKBL = MAX( WRKBL, 3*N+N* +- $ ILAENV( 1, 'DORMBR', 'QLN', M, N, N, -1 ) ) +- WRKBL = MAX( WRKBL, 3*N+N* +- $ ILAENV( 1, 'DORMBR', 'PRT', N, N, N, -1 ) ) +- MAXWRK = MAX( WRKBL, BDSPAC+3*N ) +- MINWRK = 3*N + MAX( M, BDSPAC ) ++ MAXWRK = MAX( MAXWRK, 3*N+N* ++ $ ILAENV( 1, 'DORMBR', 'QLN', M, N, N, -1 ) ) ++ MAXWRK = MAX( MAXWRK, 3*N+N* ++ $ ILAENV( 1, 'DORMBR', 'PRT', N, N, N, -1 ) ) ++ MAXWRK = MAX( MAXWRK, BDSPAC+2*N+M ) ++ MINWRK = BDSPAC + 2*N + M + ELSE IF( WNTQA ) THEN +- WRKBL = MAX( WRKBL, 3*N+M* +- $ ILAENV( 1, 'DORMBR', 'QLN', M, M, N, -1 ) ) +- WRKBL = MAX( WRKBL, 3*N+N* +- $ ILAENV( 1, 'DORMBR', 'PRT', N, N, N, -1 ) ) +- MAXWRK = MAX( MAXWRK, BDSPAC+3*N ) +- MINWRK = 3*N + MAX( M, BDSPAC ) ++ MAXWRK = MAX( MAXWRK, 3*N+M* ++ $ ILAENV( 1, 'DORMBR', 'QLN', M, M, N, -1 ) ) ++ MAXWRK = MAX( MAXWRK, 3*N+N* ++ $ ILAENV( 1, 'DORMBR', 'PRT', N, N, N, -1 ) ) ++ MAXWRK = MAX( MAXWRK, BDSPAC+2*N+M ) ++ MINWRK = BDSPAC + 2*N + M + END IF + END IF + ELSE + * + * Compute space needed for DBDSDC + * +- IF( WNTQN ) THEN +- BDSPAC = 7*M +- ELSE +- BDSPAC = 3*M*M + 4*M +- END IF ++ BDSPAC = 3*M*M + 7*M ++ BDSPAN = MAX( 12*M+4, 8*M+2+SMLSIZ*( SMLSIZ+8 ) ) + IF( N.GE.MNTHR ) THEN + IF( WNTQN ) THEN + * + * Path 1t (N much larger than M, JOBZ='N') + * +- WRKBL = M + M*ILAENV( 1, 'DGELQF', ' ', M, N, -1, +- $ -1 ) +- WRKBL = MAX( WRKBL, 3*M+2*M* +- $ ILAENV( 1, 'DGEBRD', ' ', M, M, -1, -1 ) ) +- MAXWRK = MAX( WRKBL, BDSPAC+M ) +- MINWRK = BDSPAC + M ++ MAXWRK = M + M*ILAENV( 1, 'DGELQF', ' ', M, N, -1, ++ $ -1 ) ++ MAXWRK = MAX( MAXWRK, 3*M+2*M* ++ $ ILAENV( 1, 'DGEBRD', ' ', M, M, -1, -1 ) ) ++ MAXWRK = MAX( MAXWRK, BDSPAC ) ++ MINWRK = BDSPAC + ELSE IF( WNTQO ) THEN + * + * Path 2t (N much larger than M, JOBZ='O') +@@ -349,9 +348,9 @@ + $ ILAENV( 1, 'DORMBR', 'QLN', M, M, M, -1 ) ) + WRKBL = MAX( WRKBL, 3*M+M* + $ ILAENV( 1, 'DORMBR', 'PRT', M, M, M, -1 ) ) +- WRKBL = MAX( WRKBL, BDSPAC+3*M ) ++ WRKBL = MAX( WRKBL, BDSPAC+2*M ) + MAXWRK = WRKBL + 2*M*M +- MINWRK = BDSPAC + 2*M*M + 3*M ++ MINWRK = BDSPAC + 2*M*M + 2*M + ELSE IF( WNTQS ) THEN + * + * Path 3t (N much larger than M, JOBZ='S') +@@ -365,9 +364,9 @@ + $ ILAENV( 1, 'DORMBR', 'QLN', M, M, M, -1 ) ) + WRKBL = MAX( WRKBL, 3*M+M* + $ ILAENV( 1, 'DORMBR', 'PRT', M, M, M, -1 ) ) +- WRKBL = MAX( WRKBL, BDSPAC+3*M ) ++ WRKBL = MAX( WRKBL, BDSPAC+2*M ) + MAXWRK = WRKBL + M*M +- MINWRK = BDSPAC + M*M + 3*M ++ MINWRK = BDSPAC + M*M + 2*M + ELSE IF( WNTQA ) THEN + * + * Path 4t (N much larger than M, JOBZ='A') +@@ -381,9 +380,9 @@ + $ ILAENV( 1, 'DORMBR', 'QLN', M, M, M, -1 ) ) + WRKBL = MAX( WRKBL, 3*M+M* + $ ILAENV( 1, 'DORMBR', 'PRT', M, M, M, -1 ) ) +- WRKBL = MAX( WRKBL, BDSPAC+3*M ) ++ WRKBL = MAX( WRKBL, BDSPAC+2*M ) + MAXWRK = WRKBL + M*M +- MINWRK = BDSPAC + M*M + 3*M ++ MINWRK = BDSPAC + M*M + M + N + END IF + ELSE + * +@@ -391,52 +390,47 @@ + * + WRKBL = 3*M + ( M+N )*ILAENV( 1, 'DGEBRD', ' ', M, N, -1, + $ -1 ) +- IF( WNTQN ) THEN +- MAXWRK = MAX( WRKBL, BDSPAC+3*M ) +- MINWRK = 3*M + MAX( N, BDSPAC ) +- ELSE IF( WNTQO ) THEN ++ IF( WNTQO ) THEN + WRKBL = MAX( WRKBL, 3*M+M* + $ ILAENV( 1, 'DORMBR', 'QLN', M, M, N, -1 ) ) + WRKBL = MAX( WRKBL, 3*M+M* + $ ILAENV( 1, 'DORMBR', 'PRT', M, N, M, -1 ) ) +- WRKBL = MAX( WRKBL, BDSPAC+3*M ) ++ WRKBL = MAX( WRKBL, BDSPAC+2*M ) + MAXWRK = WRKBL + M*N +- MINWRK = 3*M + MAX( N, M*M+BDSPAC ) ++ MINWRK = BDSPAC + M*M + 2*M + N + ELSE IF( WNTQS ) THEN +- WRKBL = MAX( WRKBL, 3*M+M* +- $ ILAENV( 1, 'DORMBR', 'QLN', M, M, N, -1 ) ) +- WRKBL = MAX( WRKBL, 3*M+M* +- $ ILAENV( 1, 'DORMBR', 'PRT', M, N, M, -1 ) ) +- MAXWRK = MAX( WRKBL, BDSPAC+3*M ) +- MINWRK = 3*M + MAX( N, BDSPAC ) ++ MAXWRK = MAX( MAXWRK, 3*M+M* ++ $ ILAENV( 1, 'DORMBR', 'QLN', M, M, N, -1 ) ) ++ MAXWRK = MAX( MAXWRK, 3*M+M* ++ $ ILAENV( 1, 'DORMBR', 'PRT', M, N, M, -1 ) ) ++ MAXWRK = MAX( MAXWRK, BDSPAC+2*M ) ++ MINWRK = BDSPAC + 2*M + N + ELSE IF( WNTQA ) THEN +- WRKBL = MAX( WRKBL, 3*M+M* +- $ ILAENV( 1, 'DORMBR', 'QLN', M, M, N, -1 ) ) +- WRKBL = MAX( WRKBL, 3*M+M* +- $ ILAENV( 1, 'DORMBR', 'PRT', N, N, M, -1 ) ) +- MAXWRK = MAX( WRKBL, BDSPAC+3*M ) +- MINWRK = 3*M + MAX( N, BDSPAC ) ++ MAXWRK = MAX( MAXWRK, 3*M+M* ++ $ ILAENV( 1, 'DORMBR', 'QLN', M, M, N, -1 ) ) ++ MAXWRK = MAX( MAXWRK, 3*M+N* ++ $ ILAENV( 1, 'DORMBR', 'PRT', N, N, M, -1 ) ) ++ MAXWRK = MAX( MAXWRK, BDSPAC+2*M ) ++ MINWRK = BDSPAC + 2*M + N + END IF + END IF + END IF ++ END IF ++ IF( INFO.EQ.0 ) THEN + WORK( 1 ) = MAXWRK ++ IF( LWORK.LT.MINWRK .AND. LWORK.NE.LQUERV ) ++ $ INFO = -12 + END IF + * +- IF( LWORK.LT.MINWRK .AND. .NOT.LQUERY ) THEN +- INFO = -12 +- END IF ++* Quick returns ++* + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'DGESDD', -INFO ) + RETURN +- ELSE IF( LQUERY ) THEN +- RETURN + END IF +-* +-* Quick return if possible +-* ++ IF( LWORK.EQ.LQUERV ) ++ $ RETURN + IF( M.EQ.0 .OR. N.EQ.0 ) THEN +- IF( LWORK.GE.1 ) +- $ WORK( 1 ) = ONE + RETURN + END IF + * +@@ -497,7 +491,7 @@ + NWORK = IE + N + * + * Perform bidiagonal SVD, computing singular values only +-* (Workspace: need N+BDSPAC) ++* (Workspace: need BDSPAN) + * + CALL DBDSDC( 'U', 'N', N, S, WORK( IE ), DUM, 1, DUM, 1, + $ DUM, IDUM, WORK( NWORK ), IWORK, INFO ) +@@ -512,10 +506,10 @@ + * + * WORK(IR) is LDWRKR by N + * +- IF( LWORK.GE.LDA*N+N*N+3*N+BDSPAC ) THEN ++ IF( LWORK.GE.LDA*N+4*N*N+9*N ) THEN + LDWRKR = LDA + ELSE +- LDWRKR = ( LWORK-N*N-3*N-BDSPAC ) / N ++ LDWRKR = ( LWORK-4*N*N-9*N ) / N + END IF + ITAU = IR + LDWRKR*N + NWORK = ITAU + N +@@ -557,7 +551,7 @@ + * Perform bidiagonal SVD, computing left singular vectors + * of bidiagonal matrix in WORK(IU) and computing right + * singular vectors of bidiagonal matrix in VT +-* (Workspace: need N+N*N+BDSPAC) ++* (Workspace: need 2*N*N+BDSPAC) + * + CALL DBDSDC( 'U', 'I', N, S, WORK( IE ), WORK( IU ), N, + $ VT, LDVT, DUM, IDUM, WORK( NWORK ), IWORK, +@@ -633,7 +627,7 @@ + * Perform bidiagonal SVD, computing left singular vectors + * of bidiagoal matrix in U and computing right singular + * vectors of bidiagonal matrix in VT +-* (Workspace: need N+BDSPAC) ++* (Workspace: need N*N+BDSPAC) + * + CALL DBDSDC( 'U', 'I', N, S, WORK( IE ), U, LDU, VT, + $ LDVT, DUM, IDUM, WORK( NWORK ), IWORK, +@@ -681,7 +675,7 @@ + CALL DLACPY( 'L', M, N, A, LDA, U, LDU ) + * + * Generate Q in U +-* (Workspace: need N*N+2*N, prefer N*N+N+N*NB) ++* (Workspace: need N*N+N+M, prefer N*N+N+M*NB) + CALL DORGQR( M, M, N, U, LDU, WORK( ITAU ), + $ WORK( NWORK ), LWORK-NWORK+1, IERR ) + * +@@ -703,7 +697,7 @@ + * Perform bidiagonal SVD, computing left singular vectors + * of bidiagonal matrix in WORK(IU) and computing right + * singular vectors of bidiagonal matrix in VT +-* (Workspace: need N+N*N+BDSPAC) ++* (Workspace: need N*N+BDSPAC) + * + CALL DBDSDC( 'U', 'I', N, S, WORK( IE ), WORK( IU ), N, + $ VT, LDVT, DUM, IDUM, WORK( NWORK ), IWORK, +@@ -754,13 +748,13 @@ + IF( WNTQN ) THEN + * + * Perform bidiagonal SVD, only computing singular values +-* (Workspace: need N+BDSPAC) ++* (Workspace: need BDSPAN) + * + CALL DBDSDC( 'U', 'N', N, S, WORK( IE ), DUM, 1, DUM, 1, + $ DUM, IDUM, WORK( NWORK ), IWORK, INFO ) + ELSE IF( WNTQO ) THEN + IU = NWORK +- IF( LWORK.GE.M*N+3*N+BDSPAC ) THEN ++ IF( LWORK.GE.M*N+3*N*N+9*N ) THEN + * + * WORK( IU ) is M by N + * +@@ -785,7 +779,7 @@ + * Perform bidiagonal SVD, computing left singular vectors + * of bidiagonal matrix in WORK(IU) and computing right + * singular vectors of bidiagonal matrix in VT +-* (Workspace: need N+N*N+BDSPAC) ++* (Workspace: need N*N+BDSPAC) + * + CALL DBDSDC( 'U', 'I', N, S, WORK( IE ), WORK( IU ), + $ LDWRKU, VT, LDVT, DUM, IDUM, WORK( NWORK ), +@@ -798,7 +792,7 @@ + $ WORK( ITAUP ), VT, LDVT, WORK( NWORK ), + $ LWORK-NWORK+1, IERR ) + * +- IF( LWORK.GE.M*N+3*N+BDSPAC ) THEN ++ IF( LWORK.GE.M*N+3*N*N+9*N ) THEN + * + * Overwrite WORK(IU) by left singular vectors of A + * (Workspace: need N*N+2*N, prefer N*N+N+N*NB) +@@ -838,7 +832,7 @@ + * Perform bidiagonal SVD, computing left singular vectors + * of bidiagonal matrix in U and computing right singular + * vectors of bidiagonal matrix in VT +-* (Workspace: need N+BDSPAC) ++* (Workspace: need BDSPAC) + * + CALL DLASET( 'F', M, N, ZERO, ZERO, U, LDU ) + CALL DBDSDC( 'U', 'I', N, S, WORK( IE ), U, LDU, VT, +@@ -855,12 +849,12 @@ + CALL DORMBR( 'P', 'R', 'T', N, N, N, A, LDA, + $ WORK( ITAUP ), VT, LDVT, WORK( NWORK ), + $ LWORK-NWORK+1, IERR ) +- ELSE IF( WNTQA ) THEN ++ ELSE + * + * Perform bidiagonal SVD, computing left singular vectors + * of bidiagonal matrix in U and computing right singular + * vectors of bidiagonal matrix in VT +-* (Workspace: need N+BDSPAC) ++* (Workspace: need BDSPAC) + * + CALL DLASET( 'F', M, M, ZERO, ZERO, U, LDU ) + CALL DBDSDC( 'U', 'I', N, S, WORK( IE ), U, LDU, VT, +@@ -925,7 +919,7 @@ + NWORK = IE + M + * + * Perform bidiagonal SVD, computing singular values only +-* (Workspace: need M+BDSPAC) ++* (Workspace: need BDSPAN) + * + CALL DBDSDC( 'U', 'N', M, S, WORK( IE ), DUM, 1, DUM, 1, + $ DUM, IDUM, WORK( NWORK ), IWORK, INFO ) +@@ -941,7 +935,7 @@ + * IVT is M by M + * + IL = IVT + M*M +- IF( LWORK.GE.M*N+M*M+3*M+BDSPAC ) THEN ++ IF( LWORK.GE.M*N+4*M*M+9*M ) THEN + * + * WORK(IL) is M by N + * +@@ -986,7 +980,7 @@ + * Perform bidiagonal SVD, computing left singular vectors + * of bidiagonal matrix in U, and computing right singular + * vectors of bidiagonal matrix in WORK(IVT) +-* (Workspace: need M+M*M+BDSPAC) ++* (Workspace: need 2*M*M+BDSPAC) + * + CALL DBDSDC( 'U', 'I', M, S, WORK( IE ), U, LDU, + $ WORK( IVT ), M, DUM, IDUM, WORK( NWORK ), +@@ -1061,7 +1055,7 @@ + * Perform bidiagonal SVD, computing left singular vectors + * of bidiagonal matrix in U and computing right singular + * vectors of bidiagonal matrix in VT +-* (Workspace: need M+BDSPAC) ++* (Workspace: need M*M+BDSPAC) + * + CALL DBDSDC( 'U', 'I', M, S, WORK( IE ), U, LDU, VT, + $ LDVT, DUM, IDUM, WORK( NWORK ), IWORK, +@@ -1108,7 +1102,7 @@ + CALL DLACPY( 'U', M, N, A, LDA, VT, LDVT ) + * + * Generate Q in VT +-* (Workspace: need M*M+2*M, prefer M*M+M+M*NB) ++* (Workspace: need M*M+M+N, prefer M*M+M+N*NB) + * + CALL DORGLQ( N, N, M, VT, LDVT, WORK( ITAU ), + $ WORK( NWORK ), LWORK-NWORK+1, IERR ) +@@ -1131,7 +1125,7 @@ + * Perform bidiagonal SVD, computing left singular vectors + * of bidiagonal matrix in U and computing right singular + * vectors of bidiagonal matrix in WORK(IVT) +-* (Workspace: need M+M*M+BDSPAC) ++* (Workspace: need M*M+BDSPAC) + * + CALL DBDSDC( 'U', 'I', M, S, WORK( IE ), U, LDU, + $ WORK( IVT ), LDWKVT, DUM, IDUM, +@@ -1182,14 +1176,14 @@ + IF( WNTQN ) THEN + * + * Perform bidiagonal SVD, only computing singular values +-* (Workspace: need M+BDSPAC) ++* (Workspace: need BDSPAN) + * + CALL DBDSDC( 'L', 'N', M, S, WORK( IE ), DUM, 1, DUM, 1, + $ DUM, IDUM, WORK( NWORK ), IWORK, INFO ) + ELSE IF( WNTQO ) THEN + LDWKVT = M + IVT = NWORK +- IF( LWORK.GE.M*N+3*M+BDSPAC ) THEN ++ IF( LWORK.GE.M*N+3*M*M+9*M ) THEN + * + * WORK( IVT ) is M by N + * +@@ -1224,7 +1218,7 @@ + $ WORK( ITAUQ ), U, LDU, WORK( NWORK ), + $ LWORK-NWORK+1, IERR ) + * +- IF( LWORK.GE.M*N+3*M+BDSPAC ) THEN ++ IF( LWORK.GE.M*N+3*M*M+9*M ) THEN + * + * Overwrite WORK(IVT) by left singular vectors of A + * (Workspace: need M*M+2*M, prefer M*M+M+M*NB) +@@ -1263,7 +1257,7 @@ + * Perform bidiagonal SVD, computing left singular vectors + * of bidiagonal matrix in U and computing right singular + * vectors of bidiagonal matrix in VT +-* (Workspace: need M+BDSPAC) ++* (Workspace: need BDSPAC) + * + CALL DLASET( 'F', M, N, ZERO, ZERO, VT, LDVT ) + CALL DBDSDC( 'L', 'I', M, S, WORK( IE ), U, LDU, VT, +@@ -1280,12 +1274,12 @@ + CALL DORMBR( 'P', 'R', 'T', M, N, M, A, LDA, + $ WORK( ITAUP ), VT, LDVT, WORK( NWORK ), + $ LWORK-NWORK+1, IERR ) +- ELSE IF( WNTQA ) THEN ++ ELSE + * + * Perform bidiagonal SVD, computing left singular vectors + * of bidiagonal matrix in U and computing right singular + * vectors of bidiagonal matrix in VT +-* (Workspace: need M+BDSPAC) ++* (Workspace: need BDSPAC) + * + CALL DLASET( 'F', N, N, ZERO, ZERO, VT, LDVT ) + CALL DBDSDC( 'L', 'I', M, S, WORK( IE ), U, LDU, VT, +@@ -1319,9 +1313,15 @@ + IF( ANRM.GT.BIGNUM ) + $ CALL DLASCL( 'G', 0, 0, BIGNUM, ANRM, MINMN, 1, S, MINMN, + $ IERR ) ++ IF( INFO.NE.0 .AND. ANRM.GT.BIGNUM ) ++ $ CALL DLASCL( 'G', 0, 0, BIGNUM, ANRM, MINMN-1, 1, WORK( 2 ), ++ $ MINMN, IERR ) + IF( ANRM.LT.SMLNUM ) + $ CALL DLASCL( 'G', 0, 0, SMLNUM, ANRM, MINMN, 1, S, MINMN, + $ IERR ) ++ IF( INFO.NE.0 .AND. ANRM.LT.SMLNUM ) ++ $ CALL DLASCL( 'G', 0, 0, SMLNUM, ANRM, MINMN-1, 1, WORK( 2 ), ++ $ MINMN, IERR ) + END IF + * + * Return optimal workspace in WORK(1) +diff -uNr LAPACK.orig/SRC/dgesvd.f LAPACK/SRC/dgesvd.f +--- LAPACK.orig/SRC/dgesvd.f Thu Nov 4 14:24:44 1999 ++++ LAPACK/SRC/dgesvd.f Fri May 25 16:08:25 2001 +@@ -4,7 +4,8 @@ + * -- LAPACK driver routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* October 31, 1999 ++* June 30, 1999 ++* 8-15-00: Improve consistency of WS calculations (eca) + * + * .. Scalar Arguments .. + CHARACTER JOBU, JOBVT +@@ -118,10 +119,9 @@ + * LWORK >= MAX(3*MIN(M,N)+MAX(M,N),5*MIN(M,N)). + * For good performance, LWORK should generally be larger. + * +-* If LWORK = -1, then a workspace query is assumed; the routine +-* only calculates the optimal size of the WORK array, returns +-* this value as the first entry of the WORK array, and no error +-* message related to LWORK is issued by XERBLA. ++* If LWORK = -1, a workspace query is assumed. The optimal ++* size for the WORK array is calculated and stored in WORK(1), ++* and no other work except argument checking is performed. + * + * INFO (output) INTEGER + * = 0: successful exit. +@@ -134,12 +134,14 @@ + * ===================================================================== + * + * .. Parameters .. ++ INTEGER LQUERV ++ PARAMETER ( LQUERV = -1 ) + DOUBLE PRECISION ZERO, ONE + PARAMETER ( ZERO = 0.0D0, ONE = 1.0D0 ) + * .. + * .. Local Scalars .. +- LOGICAL LQUERY, WNTUA, WNTUAS, WNTUN, WNTUO, WNTUS, +- $ WNTVA, WNTVAS, WNTVN, WNTVO, WNTVS ++ LOGICAL WNTUA, WNTUAS, WNTUN, WNTUO, WNTUS, WNTVA, ++ $ WNTVAS, WNTVN, WNTVO, WNTVS + INTEGER BDSPAC, BLK, CHUNK, I, IE, IERR, IR, ISCL, + $ ITAU, ITAUP, ITAUQ, IU, IWORK, LDWRKR, LDWRKU, + $ MAXWRK, MINMN, MINWRK, MNTHR, NCU, NCVT, NRU, +@@ -181,7 +183,7 @@ + WNTVO = LSAME( JOBVT, 'O' ) + WNTVN = LSAME( JOBVT, 'N' ) + MINWRK = 1 +- LQUERY = ( LWORK.EQ.-1 ) ++ MAXWRK = 1 + * + IF( .NOT.( WNTUA .OR. WNTUS .OR. WNTUO .OR. WNTUN ) ) THEN + INFO = -1 +@@ -208,8 +210,7 @@ + * NB refers to the optimal block size for the immediately + * following subroutine, as returned by ILAENV.) + * +- IF( INFO.EQ.0 .AND. ( LWORK.GE.1 .OR. LQUERY ) .AND. M.GT.0 .AND. +- $ N.GT.0 ) THEN ++ IF( INFO.EQ.0 .AND. M.GT.0 .AND. N.GT.0 ) THEN + IF( M.GE.N ) THEN + * + * Compute space needed for DBDSQR +@@ -557,24 +558,22 @@ + MAXWRK = MAX( MAXWRK, MINWRK ) + END IF + END IF ++ END IF ++ IF( INFO.EQ.0 ) THEN + WORK( 1 ) = MAXWRK ++ IF( LWORK.LT.MINWRK .AND. LWORK.NE.LQUERV ) ++ $ INFO = -13 + END IF + * +- IF( LWORK.LT.MINWRK .AND. .NOT.LQUERY ) THEN +- INFO = -13 +- END IF ++* Quick returns ++* + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'DGESVD', -INFO ) + RETURN +- ELSE IF( LQUERY ) THEN +- RETURN + END IF +-* +-* Quick return if possible +-* ++ IF( LWORK.EQ.LQUERV ) ++ $ RETURN + IF( M.EQ.0 .OR. N.EQ.0 ) THEN +- IF( LWORK.GE.1 ) +- $ WORK( 1 ) = ONE + RETURN + END IF + * +diff -uNr LAPACK.orig/SRC/dggbak.f LAPACK/SRC/dggbak.f +--- LAPACK.orig/SRC/dggbak.f Thu Nov 4 14:24:45 1999 ++++ LAPACK/SRC/dggbak.f Fri May 25 16:08:56 2001 +@@ -4,7 +4,7 @@ + * -- LAPACK routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* September 30, 1994 ++* February 1, 2001 + * + * .. Scalar Arguments .. + CHARACTER JOB, SIDE +@@ -108,10 +108,15 @@ + INFO = -3 + ELSE IF( ILO.LT.1 ) THEN + INFO = -4 +- ELSE IF( IHI.LT.ILO .OR. IHI.GT.MAX( 1, N ) ) THEN ++ ELSE IF( N.EQ.0 .AND. IHI.EQ.0 .AND. ILO.NE.1 ) THEN ++ INFO = -4 ++ ELSE IF( N.GT.0 .AND. ( IHI.LT.ILO .OR. IHI.GT.MAX( 1, N ) ) ) ++ $ THEN ++ INFO = -5 ++ ELSE IF( N.EQ.0 .AND. ILO.EQ.1 .AND. IHI.NE.0 ) THEN + INFO = -5 + ELSE IF( M.LT.0 ) THEN +- INFO = -6 ++ INFO = -8 + ELSE IF( LDV.LT.MAX( 1, N ) ) THEN + INFO = -10 + END IF +diff -uNr LAPACK.orig/SRC/dggbal.f LAPACK/SRC/dggbal.f +--- LAPACK.orig/SRC/dggbal.f Thu Nov 4 14:25:44 1999 ++++ LAPACK/SRC/dggbal.f Fri May 25 16:09:17 2001 +@@ -4,7 +4,7 @@ + * -- LAPACK routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* September 30, 1994 ++* April 12, 2001 + * + * .. Scalar Arguments .. + CHARACTER JOB +@@ -141,7 +141,7 @@ + ELSE IF( LDA.LT.MAX( 1, N ) ) THEN + INFO = -4 + ELSE IF( LDB.LT.MAX( 1, N ) ) THEN +- INFO = -5 ++ INFO = -6 + END IF + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'DGGBAL', -INFO ) +@@ -188,8 +188,8 @@ + IF( L.NE.1 ) + $ GO TO 30 + * +- RSCALE( 1 ) = 1 +- LSCALE( 1 ) = 1 ++ RSCALE( 1 ) = ONE ++ LSCALE( 1 ) = ONE + GO TO 190 + * + 30 CONTINUE +@@ -247,7 +247,7 @@ + * Permute rows M and I + * + 160 CONTINUE +- LSCALE( M ) = I ++ LSCALE( M ) = DBLE( I ) + IF( I.EQ.M ) + $ GO TO 170 + CALL DSWAP( N-K+1, A( I, K ), LDA, A( M, K ), LDA ) +@@ -256,7 +256,7 @@ + * Permute columns M and J + * + 170 CONTINUE +- RSCALE( M ) = J ++ RSCALE( M ) = DBLE( J ) + IF( J.EQ.M ) + $ GO TO 180 + CALL DSWAP( L, A( 1, J ), 1, A( 1, M ), 1 ) +@@ -424,7 +424,7 @@ + DO 360 I = ILO, IHI + IRAB = IDAMAX( N-ILO+1, A( I, ILO ), LDA ) + RAB = ABS( A( I, IRAB+ILO-1 ) ) +- IRAB = IDAMAX( N-ILO+1, B( I, ILO ), LDA ) ++ IRAB = IDAMAX( N-ILO+1, B( I, ILO ), LDB ) + RAB = MAX( RAB, ABS( B( I, IRAB+ILO-1 ) ) ) + LRAB = INT( LOG10( RAB+SFMIN ) / BASL+ONE ) + IR = LSCALE( I ) + SIGN( HALF, LSCALE( I ) ) +diff -uNr LAPACK.orig/SRC/dgges.f LAPACK/SRC/dgges.f +--- LAPACK.orig/SRC/dgges.f Thu Nov 4 14:26:18 1999 ++++ LAPACK/SRC/dgges.f Fri May 25 16:09:38 2001 +@@ -6,6 +6,7 @@ + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University + * June 30, 1999 ++* 8-15-00: Improve consistency of WS calculations (eca) + * + * .. Scalar Arguments .. + CHARACTER JOBVSL, JOBVSR, SORT +@@ -158,10 +159,9 @@ + * LWORK (input) INTEGER + * The dimension of the array WORK. LWORK >= 8*N+16. + * +-* If LWORK = -1, then a workspace query is assumed; the routine +-* only calculates the optimal size of the WORK array, returns +-* this value as the first entry of the WORK array, and no error +-* message related to LWORK is issued by XERBLA. ++* If LWORK = -1, a workspace query is assumed. The optimal ++* size for the WORK array is calculated and stored in WORK(1), ++* and no other work except argument checking is performed. + * + * BWORK (workspace) LOGICAL array, dimension (N) + * Not referenced if SORT = 'N'. +@@ -184,12 +184,14 @@ + * ===================================================================== + * + * .. Parameters .. ++ INTEGER LQUERV ++ PARAMETER ( LQUERV = -1 ) + DOUBLE PRECISION ZERO, ONE + PARAMETER ( ZERO = 0.0D+0, ONE = 1.0D+0 ) + * .. + * .. Local Scalars .. + LOGICAL CURSL, ILASCL, ILBSCL, ILVSL, ILVSR, LASTSL, +- $ LQUERY, LST2SL, WANTST ++ $ LST2SL, WANTST + INTEGER I, ICOLS, IERR, IHI, IJOBVL, IJOBVR, ILEFT, + $ ILO, IP, IRIGHT, IROWS, ITAU, IWRK, MAXWRK, + $ MINWRK +@@ -245,7 +247,6 @@ + * Test the input arguments + * + INFO = 0 +- LQUERY = ( LWORK.EQ.-1 ) + IF( IJOBVL.LE.0 ) THEN + INFO = -1 + ELSE IF( IJOBVR.LE.0 ) THEN +@@ -272,7 +273,7 @@ + * following subroutine, as returned by ILAENV.) + * + MINWRK = 1 +- IF( INFO.EQ.0 .AND. ( LWORK.GE.1 .OR. LQUERY ) ) THEN ++ IF( INFO.EQ.0 ) THEN + MINWRK = 7*( N+1 ) + 16 + MAXWRK = 7*( N+1 ) + N*ILAENV( 1, 'DGEQRF', ' ', N, 1, N, 0 ) + + $ 16 +@@ -281,19 +282,18 @@ + $ ILAENV( 1, 'DORGQR', ' ', N, 1, N, -1 ) ) + END IF + WORK( 1 ) = MAXWRK ++ IF( LWORK.LT.MINWRK .AND. LWORK.NE.LQUERV ) ++ $ INFO = -19 + END IF + * +- IF( LWORK.LT.MINWRK .AND. .NOT.LQUERY ) +- $ INFO = -19 ++* Quick returns ++* + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'DGGES ', -INFO ) + RETURN +- ELSE IF( LQUERY ) THEN +- RETURN + END IF +-* +-* Quick return if possible +-* ++ IF( LWORK.EQ.LQUERV ) ++ $ RETURN + IF( N.EQ.0 ) THEN + SDIM = 0 + RETURN +diff -uNr LAPACK.orig/SRC/dggesx.f LAPACK/SRC/dggesx.f +--- LAPACK.orig/SRC/dggesx.f Thu Nov 4 14:26:18 1999 ++++ LAPACK/SRC/dggesx.f Fri May 25 16:09:56 2001 +@@ -7,6 +7,7 @@ + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University + * June 30, 1999 ++* 8-15-00: Do WS calculations if LWORK = -1 (eca) + * + * .. Scalar Arguments .. + CHARACTER JOBVSL, JOBVSR, SENSE, SORT +@@ -185,6 +186,10 @@ + * If SENSE = 'E', 'V', or 'B', + * LWORK >= MAX( 8*(N+1)+16, 2*SDIM*(N-SDIM) ). + * ++* If LWORK = -1, a workspace query is assumed. The optimal ++* size for the WORK array is calculated and stored in WORK(1), ++* and no other work except argument checking is performed. ++* + * IWORK (workspace) INTEGER array, dimension (LIWORK) + * Not referenced if SENSE = 'N'. + * +@@ -227,6 +232,8 @@ + * ===================================================================== + * + * .. Parameters .. ++ INTEGER LQUERV ++ PARAMETER ( LQUERV = -1 ) + DOUBLE PRECISION ZERO, ONE + PARAMETER ( ZERO = 0.0D+0, ONE = 1.0D+0 ) + * .. +@@ -330,7 +337,7 @@ + * following subroutine, as returned by ILAENV.) + * + MINWRK = 1 +- IF( INFO.EQ.0 .AND. LWORK.GE.1 ) THEN ++ IF( INFO.EQ.0 ) THEN + MINWRK = 8*( N+1 ) + 16 + MAXWRK = 7*( N+1 ) + N*ILAENV( 1, 'DGEQRF', ' ', N, 1, N, 0 ) + + $ 16 +@@ -338,7 +345,15 @@ + MAXWRK = MAX( MAXWRK, 8*( N+1 )+N* + $ ILAENV( 1, 'DORGQR', ' ', N, 1, N, -1 )+16 ) + END IF ++* ++* Estimate the workspace needed by DTGSEN. ++* ++ IF( WANTST ) THEN ++ MAXWRK = MAX( MAXWRK, 2*N+( N*N+1 ) / 2 ) ++ END IF + WORK( 1 ) = MAXWRK ++ IF( LWORK.LT.MINWRK .AND. LWORK.NE.LQUERV ) ++ $ INFO = -22 + END IF + IF( .NOT.WANTSN ) THEN + LIWMIN = 1 +@@ -346,21 +361,19 @@ + LIWMIN = N + 6 + END IF + IWORK( 1 ) = LIWMIN +-* +- IF( INFO.EQ.0 .AND. LWORK.LT.MINWRK ) THEN +- INFO = -22 +- ELSE IF( INFO.EQ.0 .AND. IJOB.GE.1 ) THEN ++ IF( INFO.EQ.0 .AND. IJOB.GE.1 ) THEN + IF( LIWORK.LT.LIWMIN ) + $ INFO = -24 + END IF + * ++* Quick returns ++* + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'DGGESX', -INFO ) + RETURN + END IF +-* +-* Quick return if possible +-* ++ IF( LWORK.EQ.LQUERV ) ++ $ RETURN + IF( N.EQ.0 ) THEN + SDIM = 0 + RETURN +diff -uNr LAPACK.orig/SRC/dggev.f LAPACK/SRC/dggev.f +--- LAPACK.orig/SRC/dggev.f Thu Nov 4 14:26:18 1999 ++++ LAPACK/SRC/dggev.f Fri May 25 16:10:14 2001 +@@ -5,6 +5,7 @@ + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University + * June 30, 1999 ++* 8-15-00: Improve consistency of WS calculations (eca) + * + * .. Scalar Arguments .. + CHARACTER JOBVL, JOBVR +@@ -123,10 +124,9 @@ + * The dimension of the array WORK. LWORK >= max(1,8*N). + * For good performance, LWORK must generally be larger. + * +-* If LWORK = -1, then a workspace query is assumed; the routine +-* only calculates the optimal size of the WORK array, returns +-* this value as the first entry of the WORK array, and no error +-* message related to LWORK is issued by XERBLA. ++* If LWORK = -1, a workspace query is assumed. The optimal ++* size for the WORK array is calculated and stored in WORK(1), ++* and no other work except argument checking is performed. + * + * INFO (output) INTEGER + * = 0: successful exit +@@ -141,11 +141,13 @@ + * ===================================================================== + * + * .. Parameters .. ++ INTEGER LQUERV ++ PARAMETER ( LQUERV = -1 ) + DOUBLE PRECISION ZERO, ONE + PARAMETER ( ZERO = 0.0D+0, ONE = 1.0D+0 ) + * .. + * .. Local Scalars .. +- LOGICAL ILASCL, ILBSCL, ILV, ILVL, ILVR, LQUERY ++ LOGICAL ILASCL, ILBSCL, ILV, ILVL, ILVR + CHARACTER CHTEMP + INTEGER ICOLS, IERR, IHI, IJOBVL, IJOBVR, ILEFT, ILO, + $ IN, IRIGHT, IROWS, ITAU, IWRK, JC, JR, MAXWRK, +@@ -157,8 +159,9 @@ + LOGICAL LDUMMA( 1 ) + * .. + * .. External Subroutines .. +- EXTERNAL DGEQRF, DGGBAK, DGGBAL, DGGHRD, DHGEQZ, DLACPY, +- $ DLASCL, DLASET, DORGQR, DORMQR, DTGEVC, XERBLA ++ EXTERNAL DGEQRF, DGGBAK, DGGBAL, DGGHRD, DHGEQZ, DLABAD, ++ $ DLACPY, DLASCL, DLASET, DORGQR, DORMQR, DTGEVC, ++ $ XERBLA + * .. + * .. External Functions .. + LOGICAL LSAME +@@ -199,7 +202,6 @@ + * Test the input arguments + * + INFO = 0 +- LQUERY = ( LWORK.EQ.-1 ) + IF( IJOBVL.LE.0 ) THEN + INFO = -1 + ELSE IF( IJOBVR.LE.0 ) THEN +@@ -225,24 +227,22 @@ + * computed assuming ILO = 1 and IHI = N, the worst case.) + * + MINWRK = 1 +- IF( INFO.EQ.0 .AND. ( LWORK.GE.1 .OR. LQUERY ) ) THEN ++ IF( INFO.EQ.0 ) THEN + MAXWRK = 7*N + N*ILAENV( 1, 'DGEQRF', ' ', N, 1, N, 0 ) + MINWRK = MAX( 1, 8*N ) + WORK( 1 ) = MAXWRK ++ IF( LWORK.LT.MINWRK .AND. LWORK.NE.LQUERV ) ++ $ INFO = -16 + END IF + * +- IF( LWORK.LT.MINWRK .AND. .NOT.LQUERY ) +- $ INFO = -16 ++* Quick returns + * + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'DGGEV ', -INFO ) + RETURN +- ELSE IF( LQUERY ) THEN +- RETURN + END IF +-* +-* Quick return if possible +-* ++ IF( LWORK.EQ.LQUERV ) ++ $ RETURN + IF( N.EQ.0 ) + $ RETURN + * +diff -uNr LAPACK.orig/SRC/dggevx.f LAPACK/SRC/dggevx.f +--- LAPACK.orig/SRC/dggevx.f Thu Nov 4 14:26:18 1999 ++++ LAPACK/SRC/dggevx.f Fri May 25 16:11:31 2001 +@@ -7,6 +7,7 @@ + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University + * June 30, 1999 ++* 8-15-00: Improve consistency of WS calculations (eca) + * + * .. Scalar Arguments .. + CHARACTER BALANC, JOBVL, JOBVR, SENSE +@@ -212,10 +213,9 @@ + * If SENSE = 'E', LWORK >= 12*N. + * If SENSE = 'V' or 'B', LWORK >= 2*N*N+12*N+16. + * +-* If LWORK = -1, then a workspace query is assumed; the routine +-* only calculates the optimal size of the WORK array, returns +-* this value as the first entry of the WORK array, and no error +-* message related to LWORK is issued by XERBLA. ++* If LWORK = -1, a workspace query is assumed. The optimal ++* size for the WORK array is calculated and stored in WORK(1), ++* and no other work except argument checking is performed. + * + * IWORK (workspace) INTEGER array, dimension (N+6) + * If SENSE = 'E', IWORK is not referenced. +@@ -262,12 +262,14 @@ + * ===================================================================== + * + * .. Parameters .. ++ INTEGER LQUERV ++ PARAMETER ( LQUERV = -1 ) + DOUBLE PRECISION ZERO, ONE + PARAMETER ( ZERO = 0.0D+0, ONE = 1.0D+0 ) + * .. + * .. Local Scalars .. +- LOGICAL ILASCL, ILBSCL, ILV, ILVL, ILVR, LQUERY, PAIR, +- $ WANTSB, WANTSE, WANTSN, WANTSV ++ LOGICAL ILASCL, ILBSCL, ILV, ILVL, ILVR, PAIR, WANTSB, ++ $ WANTSE, WANTSN, WANTSV + CHARACTER CHTEMP + INTEGER I, ICOLS, IERR, IJOBVL, IJOBVR, IN, IROWS, + $ ITAU, IWRK, IWRK1, J, JC, JR, M, MAXWRK, +@@ -279,9 +281,9 @@ + LOGICAL LDUMMA( 1 ) + * .. + * .. External Subroutines .. +- EXTERNAL DGEQRF, DGGBAK, DGGBAL, DGGHRD, DHGEQZ, DLACPY, +- $ DLASCL, DLASET, DORGQR, DORMQR, DTGEVC, DTGSNA, +- $ XERBLA ++ EXTERNAL DGEQRF, DGGBAK, DGGBAL, DGGHRD, DHGEQZ, DLABAD, ++ $ DLACPY, DLASCL, DLASET, DORGQR, DORMQR, DTGEVC, ++ $ DTGSNA, XERBLA + * .. + * .. External Functions .. + LOGICAL LSAME +@@ -327,7 +329,6 @@ + * Test the input arguments + * + INFO = 0 +- LQUERY = ( LWORK.EQ.-1 ) + IF( .NOT.( LSAME( BALANC, 'N' ) .OR. LSAME( BALANC, + $ 'S' ) .OR. LSAME( BALANC, 'P' ) .OR. LSAME( BALANC, 'B' ) ) ) + $ THEN +@@ -360,7 +361,7 @@ + * computed assuming ILO = 1 and IHI = N, the worst case.) + * + MINWRK = 1 +- IF( INFO.EQ.0 .AND. ( LWORK.GE.1 .OR. LQUERY ) ) THEN ++ IF( INFO.EQ.0 ) THEN + MAXWRK = 5*N + N*ILAENV( 1, 'DGEQRF', ' ', N, 1, N, 0 ) + MINWRK = MAX( 1, 6*N ) + IF( WANTSE ) THEN +@@ -370,24 +371,20 @@ + MAXWRK = MAX( MAXWRK, 2*N*N+12*N+16 ) + END IF + WORK( 1 ) = MAXWRK ++ IF( LWORK.LT.MINWRK .AND. LWORK.NE.LQUERV ) ++ $ INFO = -26 + END IF + * +- IF( LWORK.LT.MINWRK .AND. .NOT.LQUERY ) THEN +- INFO = -26 +- END IF ++* Quick returns + * + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'DGGEVX', -INFO ) + RETURN +- ELSE IF( LQUERY ) THEN +- RETURN + END IF +-* +-* Quick return if possible +-* ++ IF( LWORK.EQ.LQUERV ) ++ $ RETURN + IF( N.EQ.0 ) + $ RETURN +-* + * + * Get machine constants + * +diff -uNr LAPACK.orig/SRC/dgghrd.f LAPACK/SRC/dgghrd.f +--- LAPACK.orig/SRC/dgghrd.f Thu Nov 4 14:25:43 1999 ++++ LAPACK/SRC/dgghrd.f Fri May 25 16:11:50 2001 +@@ -4,7 +4,7 @@ + * -- LAPACK routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* September 30, 1994 ++* April 26, 2001 + * + * .. Scalar Arguments .. + CHARACTER COMPQ, COMPZ +@@ -20,16 +20,32 @@ + * + * DGGHRD reduces a pair of real matrices (A,B) to generalized upper + * Hessenberg form using orthogonal transformations, where A is a +-* general matrix and B is upper triangular: Q' * A * Z = H and +-* Q' * B * Z = T, where H is upper Hessenberg, T is upper triangular, +-* and Q and Z are orthogonal, and ' means transpose. ++* general matrix and B is upper triangular. The form of the ++* generalized eigenvalue problem is ++* A*x = lambda*B*x, ++* and B is typically made upper triangular by computing its QR ++* factorization and moving the orthogonal matrix Q to the left side ++* of the equation. ++* ++* This subroutine simultaneously reduces A to a Hessenberg matrix H: ++* Q**T*A*Z = H ++* and transforms B to another upper triangular matrix T: ++* Q**T*B*Z = T ++* in order to reduce the problem to its standard form ++* H*y = lambda*T*y ++* where y = Z**T*x. + * + * The orthogonal matrices Q and Z are determined as products of Givens + * rotations. They may either be formed explicitly, or they may be + * postmultiplied into input matrices Q1 and Z1, so that + * +-* Q1 * A * Z1' = (Q1*Q) * H * (Z1*Z)' +-* Q1 * B * Z1' = (Q1*Q) * T * (Z1*Z)' ++* Q1 * A * Z1**T = (Q1*Q) * H * (Z1*Z)**T ++* ++* Q1 * B * Z1**T = (Q1*Q) * T * (Z1*Z)**T ++* ++* If Q1 is the orthogonal matrix from the QR factorization of B in the ++* original equation A*x = lambda*B*x, then DGGHRD reduces the original ++* problem to generalized Hessenberg form. + * + * Arguments + * ========= +@@ -53,10 +69,11 @@ + * + * ILO (input) INTEGER + * IHI (input) INTEGER +-* It is assumed that A is already upper triangular in rows and +-* columns 1:ILO-1 and IHI+1:N. ILO and IHI are normally set +-* by a previous call to DGGBAL; otherwise they should be set +-* to 1 and N respectively. ++* ILO and IHI mark the rows and columns of A which are to be ++* reduced. It is assumed that A is already upper triangular ++* in rows and columns 1:ILO-1 and IHI+1:N. ILO and IHI are ++* normally set by a previous call to SGGBAL; otherwise they ++* should be set to 1 and N respectively. + * 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0. + * + * A (input/output) DOUBLE PRECISION array, dimension (LDA, N) +@@ -70,33 +87,28 @@ + * + * B (input/output) DOUBLE PRECISION array, dimension (LDB, N) + * On entry, the N-by-N upper triangular matrix B. +-* On exit, the upper triangular matrix T = Q' B Z. The ++* On exit, the upper triangular matrix T = Q**T B Z. The + * elements below the diagonal are set to zero. + * + * LDB (input) INTEGER + * The leading dimension of the array B. LDB >= max(1,N). + * + * Q (input/output) DOUBLE PRECISION array, dimension (LDQ, N) +-* If COMPQ='N': Q is not referenced. +-* If COMPQ='I': on entry, Q need not be set, and on exit it +-* contains the orthogonal matrix Q, where Q' +-* is the product of the Givens transformations +-* which are applied to A and B on the left. +-* If COMPQ='V': on entry, Q must contain an orthogonal matrix +-* Q1, and on exit this is overwritten by Q1*Q. ++* On entry, if COMPQ = 'V', the orthogonal matrix Q1, ++* typically from the QR factorization of B. ++* On exit, if COMPQ='I', the orthogonal matrix Q, and if ++* COMPQ = 'V', the product Q1*Q. ++* Not referenced if COMPQ='N'. + * + * LDQ (input) INTEGER + * The leading dimension of the array Q. + * LDQ >= N if COMPQ='V' or 'I'; LDQ >= 1 otherwise. + * + * Z (input/output) DOUBLE PRECISION array, dimension (LDZ, N) +-* If COMPZ='N': Z is not referenced. +-* If COMPZ='I': on entry, Z need not be set, and on exit it +-* contains the orthogonal matrix Z, which is +-* the product of the Givens transformations +-* which are applied to A and B on the right. +-* If COMPZ='V': on entry, Z must contain an orthogonal matrix +-* Z1, and on exit this is overwritten by Z1*Z. ++* On entry, if COMPZ = 'V', the orthogonal matrix Z1. ++* On exit, if COMPZ='I', the orthogonal matrix Z, and if ++* COMPZ = 'V', the product Z1*Z. ++* Not referenced if COMPZ='N'. + * + * LDZ (input) INTEGER + * The leading dimension of the array Z. +diff -uNr LAPACK.orig/SRC/dhgeqz.f LAPACK/SRC/dhgeqz.f +--- LAPACK.orig/SRC/dhgeqz.f Thu Nov 4 14:24:45 1999 ++++ LAPACK/SRC/dhgeqz.f Fri May 25 16:12:11 2001 +@@ -1,56 +1,75 @@ +- SUBROUTINE DHGEQZ( JOB, COMPQ, COMPZ, N, ILO, IHI, A, LDA, B, LDB, ++ SUBROUTINE DHGEQZ( JOB, COMPQ, COMPZ, N, ILO, IHI, H, LDH, T, LDT, + $ ALPHAR, ALPHAI, BETA, Q, LDQ, Z, LDZ, WORK, + $ LWORK, INFO ) + * + * -- LAPACK routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* June 30, 1999 ++* May 3, 2001 + * + * .. Scalar Arguments .. + CHARACTER COMPQ, COMPZ, JOB +- INTEGER IHI, ILO, INFO, LDA, LDB, LDQ, LDZ, LWORK, N ++ INTEGER IHI, ILO, INFO, LDH, LDQ, LDT, LDZ, LWORK, N + * .. + * .. Array Arguments .. +- DOUBLE PRECISION A( LDA, * ), ALPHAI( * ), ALPHAR( * ), +- $ B( LDB, * ), BETA( * ), Q( LDQ, * ), WORK( * ), +- $ Z( LDZ, * ) ++ DOUBLE PRECISION ALPHAI( * ), ALPHAR( * ), BETA( * ), ++ $ H( LDH, * ), Q( LDQ, * ), T( LDT, * ), ++ $ WORK( * ), Z( LDZ, * ) + * .. + * + * Purpose + * ======= + * +-* DHGEQZ implements a single-/double-shift version of the QZ method for +-* finding the generalized eigenvalues +-* +-* w(j)=(ALPHAR(j) + i*ALPHAI(j))/BETAR(j) of the equation +-* +-* det( A - w(i) B ) = 0 +-* +-* In addition, the pair A,B may be reduced to generalized Schur form: +-* B is upper triangular, and A is block upper triangular, where the +-* diagonal blocks are either 1-by-1 or 2-by-2, the 2-by-2 blocks having +-* complex generalized eigenvalues (see the description of the argument +-* JOB.) +-* +-* If JOB='S', then the pair (A,B) is simultaneously reduced to Schur +-* form by applying one orthogonal tranformation (usually called Q) on +-* the left and another (usually called Z) on the right. The 2-by-2 +-* upper-triangular diagonal blocks of B corresponding to 2-by-2 blocks +-* of A will be reduced to positive diagonal matrices. (I.e., +-* if A(j+1,j) is non-zero, then B(j+1,j)=B(j,j+1)=0 and B(j,j) and +-* B(j+1,j+1) will be positive.) +-* +-* If JOB='E', then at each iteration, the same transformations +-* are computed, but they are only applied to those parts of A and B +-* which are needed to compute ALPHAR, ALPHAI, and BETAR. +-* +-* If JOB='S' and COMPQ and COMPZ are 'V' or 'I', then the orthogonal +-* transformations used to reduce (A,B) are accumulated into the arrays +-* Q and Z s.t.: +-* +-* Q(in) A(in) Z(in)* = Q(out) A(out) Z(out)* +-* Q(in) B(in) Z(in)* = Q(out) B(out) Z(out)* ++* DHGEQZ computes the eigenvalues of a real matrix pair (H,T), ++* where H is an upper Hessenberg matrix and T is upper triangular, ++* using the double-shift QZ method. ++* Matrix pairs of this type are produced by the reduction to ++* generalized upper Hessenberg form of a real matrix pair (A,B): ++* ++* A = Q1*H*Z1**T, B = Q1*T*Z1**T, ++* ++* as computed by DGGHRD. ++* ++* If JOB='S', then the Hessenberg-triangular pair (H,T) is ++* also reduced to generalized Schur form, ++* ++* H = Q*S*Z**T, T = Q*P*Z**T, ++* ++* where Q and Z are orthogonal matrices, P is an upper triangular ++* matrix, and S is a quasi-triangular matrix with 1-by-1 and 2-by-2 ++* diagonal blocks. ++* ++* The 1-by-1 blocks correspond to real eigenvalues of the matrix pair ++* (H,T) and the 2-by-2 blocks correspond to complex conjugate pairs of ++* eigenvalues. ++* ++* Additionally, the 2-by-2 upper triangular diagonal blocks of P ++* corresponding to 2-by-2 blocks of S are reduced to positive diagonal ++* form, i.e., if S(j+1,j) is non-zero, then P(j+1,j) = P(j,j+1) = 0, ++* P(j,j) > 0, and P(j+1,j+1) > 0. ++* ++* Optionally, the orthogonal matrix Q from the generalized Schur ++* factorization may be postmultiplied into an input matrix Q1, and the ++* orthogonal matrix Z may be postmultiplied into an input matrix Z1. ++* If Q1 and Z1 are the orthogonal matrices from DGGHRD that reduced ++* the matrix pair (A,B) to generalized upper Hessenberg form, then the ++* output matrices Q1*Q and Z1*Z are the orthogonal factors from the ++* generalized Schur factorization of (A,B): ++* ++* A = (Q1*Q)*S*(Z1*Z)**T, B = (Q1*Q)*P*(Z1*Z)**T. ++* ++* To avoid overflow, eigenvalues of the matrix pair (H,T) (equivalently, ++* of (A,B)) are computed as a pair of values (alpha,beta), where alpha is ++* complex and beta real. ++* If beta is nonzero, lambda = alpha / beta is an eigenvalue of the ++* generalized nonsymmetric eigenvalue problem (GNEP) ++* A*x = lambda*B*x ++* and if alpha is nonzero, mu = beta / alpha is an eigenvalue of the ++* alternate form of the GNEP ++* mu*A*y = B*y. ++* Real eigenvalues can be read directly from the generalized Schur ++* form: ++* alpha = S(i,i), beta = P(i,i). + * + * Ref: C.B. Moler & G.W. Stewart, "An Algorithm for Generalized Matrix + * Eigenvalue Problems", SIAM J. Numer. Anal., 10(1973), +@@ -60,114 +79,98 @@ + * ========= + * + * JOB (input) CHARACTER*1 +-* = 'E': compute only ALPHAR, ALPHAI, and BETA. A and B will +-* not necessarily be put into generalized Schur form. +-* = 'S': put A and B into generalized Schur form, as well +-* as computing ALPHAR, ALPHAI, and BETA. ++* = 'E': Compute eigenvalues only; ++* = 'S': Compute eigenvalues and the Schur form. + * + * COMPQ (input) CHARACTER*1 +-* = 'N': do not modify Q. +-* = 'V': multiply the array Q on the right by the transpose of +-* the orthogonal tranformation that is applied to the +-* left side of A and B to reduce them to Schur form. +-* = 'I': like COMPQ='V', except that Q will be initialized to +-* the identity first. ++* = 'N': Left Schur vectors (Q) are not computed; ++* = 'I': Q is initialized to the unit matrix and the matrix Q ++* of left Schur vectors of (H,T) is returned; ++* = 'V': Q must contain an orthogonal matrix Q1 on entry and ++* the product Q1*Q is returned. + * + * COMPZ (input) CHARACTER*1 +-* = 'N': do not modify Z. +-* = 'V': multiply the array Z on the right by the orthogonal +-* tranformation that is applied to the right side of +-* A and B to reduce them to Schur form. +-* = 'I': like COMPZ='V', except that Z will be initialized to +-* the identity first. ++* = 'N': Right Schur vectors (Z) are not computed; ++* = 'I': Z is initialized to the unit matrix and the matrix Z ++* of right Schur vectors of (H,T) is returned; ++* = 'V': Z must contain an orthogonal matrix Z1 on entry and ++* the product Z1*Z is returned. + * + * N (input) INTEGER +-* The order of the matrices A, B, Q, and Z. N >= 0. ++* The order of the matrices H, T, Q, and Z. N >= 0. + * + * ILO (input) INTEGER + * IHI (input) INTEGER +-* It is assumed that A is already upper triangular in rows and +-* columns 1:ILO-1 and IHI+1:N. +-* 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0. +-* +-* A (input/output) DOUBLE PRECISION array, dimension (LDA, N) +-* On entry, the N-by-N upper Hessenberg matrix A. Elements +-* below the subdiagonal must be zero. +-* If JOB='S', then on exit A and B will have been +-* simultaneously reduced to generalized Schur form. +-* If JOB='E', then on exit A will have been destroyed. +-* The diagonal blocks will be correct, but the off-diagonal +-* portion will be meaningless. +-* +-* LDA (input) INTEGER +-* The leading dimension of the array A. LDA >= max( 1, N ). +-* +-* B (input/output) DOUBLE PRECISION array, dimension (LDB, N) +-* On entry, the N-by-N upper triangular matrix B. Elements +-* below the diagonal must be zero. 2-by-2 blocks in B +-* corresponding to 2-by-2 blocks in A will be reduced to +-* positive diagonal form. (I.e., if A(j+1,j) is non-zero, +-* then B(j+1,j)=B(j,j+1)=0 and B(j,j) and B(j+1,j+1) will be +-* positive.) +-* If JOB='S', then on exit A and B will have been +-* simultaneously reduced to Schur form. +-* If JOB='E', then on exit B will have been destroyed. +-* Elements corresponding to diagonal blocks of A will be +-* correct, but the off-diagonal portion will be meaningless. ++* ILO and IHI mark the rows and columns of H which are in ++* Hessenberg form. It is assumed that A is already upper ++* triangular in rows and columns 1:ILO-1 and IHI+1:N. ++* If N > 0, 1 <= ILO <= IHI <= N; if N = 0, ILO=1 and IHI=0. ++* ++* H (input/output) DOUBLE PRECISION array, dimension (LDH, N) ++* On entry, the N-by-N upper Hessenberg matrix H. ++* On exit, if JOB = 'S', H contains the upper quasi-triangular ++* matrix S from the generalized Schur factorization; ++* 2-by-2 diagonal blocks (corresponding to complex conjugate ++* pairs of eigenvalues) are returned in standard form, with ++* H(i,i) = H(i+1,i+1) and H(i+1,i)*H(i,i+1) < 0. ++* If JOB = 'E', the diagonal blocks of H match those of S, but ++* the rest of H is unspecified. ++* ++* LDH (input) INTEGER ++* The leading dimension of the array H. LDH >= max( 1, N ). ++* ++* T (input/output) DOUBLE PRECISION array, dimension (LDT, N) ++* On entry, the N-by-N upper triangular matrix T. ++* On exit, if JOB = 'S', T contains the upper triangular ++* matrix P from the generalized Schur factorization; ++* 2-by-2 diagonal blocks of P corresponding to 2-by-2 blocks of S ++* are reduced to positive diagonal form, i.e., if H(j+1,j) is ++* non-zero, then T(j+1,j) = T(j,j+1) = 0, T(j,j) > 0, and ++* T(j+1,j+1) > 0. ++* If JOB = 'E', the diagonal blocks of T match those of P, but ++* the rest of T is unspecified. + * +-* LDB (input) INTEGER +-* The leading dimension of the array B. LDB >= max( 1, N ). ++* LDT (input) INTEGER ++* The leading dimension of the array T. LDT >= max( 1, N ). + * + * ALPHAR (output) DOUBLE PRECISION array, dimension (N) +-* ALPHAR(1:N) will be set to real parts of the diagonal +-* elements of A that would result from reducing A and B to +-* Schur form and then further reducing them both to triangular +-* form using unitary transformations s.t. the diagonal of B +-* was non-negative real. Thus, if A(j,j) is in a 1-by-1 block +-* (i.e., A(j+1,j)=A(j,j+1)=0), then ALPHAR(j)=A(j,j). +-* Note that the (real or complex) values +-* (ALPHAR(j) + i*ALPHAI(j))/BETA(j), j=1,...,N, are the +-* generalized eigenvalues of the matrix pencil A - wB. ++* The real parts of each scalar alpha defining an eigenvalue ++* of GNEP. + * + * ALPHAI (output) DOUBLE PRECISION array, dimension (N) +-* ALPHAI(1:N) will be set to imaginary parts of the diagonal +-* elements of A that would result from reducing A and B to +-* Schur form and then further reducing them both to triangular +-* form using unitary transformations s.t. the diagonal of B +-* was non-negative real. Thus, if A(j,j) is in a 1-by-1 block +-* (i.e., A(j+1,j)=A(j,j+1)=0), then ALPHAR(j)=0. +-* Note that the (real or complex) values +-* (ALPHAR(j) + i*ALPHAI(j))/BETA(j), j=1,...,N, are the +-* generalized eigenvalues of the matrix pencil A - wB. ++* The imaginary parts of each scalar alpha defining an ++* eigenvalue of GNEP. ++* If ALPHAI(j) is zero, then the j-th eigenvalue is real; if ++* positive, then the j-th and (j+1)-st eigenvalues are a ++* complex conjugate pair, with ALPHAI(j+1) = -ALPHAI(j). + * + * BETA (output) DOUBLE PRECISION array, dimension (N) +-* BETA(1:N) will be set to the (real) diagonal elements of B +-* that would result from reducing A and B to Schur form and +-* then further reducing them both to triangular form using +-* unitary transformations s.t. the diagonal of B was +-* non-negative real. Thus, if A(j,j) is in a 1-by-1 block +-* (i.e., A(j+1,j)=A(j,j+1)=0), then BETA(j)=B(j,j). +-* Note that the (real or complex) values +-* (ALPHAR(j) + i*ALPHAI(j))/BETA(j), j=1,...,N, are the +-* generalized eigenvalues of the matrix pencil A - wB. +-* (Note that BETA(1:N) will always be non-negative, and no +-* BETAI is necessary.) ++* The scalars beta that define the eigenvalues of GNEP. ++* Together, the quantities alpha = (ALPHAR(j),ALPHAI(j)) and ++* beta = BETA(j) represent the j-th eigenvalue of the matrix ++* pair (A,B), in one of the forms lambda = alpha/beta or ++* mu = beta/alpha. Since either lambda or mu may overflow, ++* they should not, in general, be computed. + * + * Q (input/output) DOUBLE PRECISION array, dimension (LDQ, N) +-* If COMPQ='N', then Q will not be referenced. +-* If COMPQ='V' or 'I', then the transpose of the orthogonal +-* transformations which are applied to A and B on the left +-* will be applied to the array Q on the right. ++* On entry, if COMPZ = 'V', the orthogonal matrix Q1 used in ++* the reduction of (A,B) to generalized Hessenberg form. ++* On exit, if COMPZ = 'I', the orthogonal matrix of left Schur ++* vectors of (H,T), and if COMPZ = 'V', the orthogonal matrix ++* of left Schur vectors of (A,B). ++* Not referenced if COMPZ = 'N'. + * + * LDQ (input) INTEGER + * The leading dimension of the array Q. LDQ >= 1. + * If COMPQ='V' or 'I', then LDQ >= N. + * + * Z (input/output) DOUBLE PRECISION array, dimension (LDZ, N) +-* If COMPZ='N', then Z will not be referenced. +-* If COMPZ='V' or 'I', then the orthogonal transformations +-* which are applied to A and B on the right will be applied +-* to the array Z on the right. ++* On entry, if COMPZ = 'V', the orthogonal matrix Z1 used in ++* the reduction of (A,B) to generalized Hessenberg form. ++* On exit, if COMPZ = 'I', the orthogonal matrix of ++* right Schur vectors of (H,T), and if COMPZ = 'V', the ++* orthogonal matrix of right Schur vectors of (A,B). ++* Not referenced if COMPZ = 'N'. + * + * LDZ (input) INTEGER + * The leading dimension of the array Z. LDZ >= 1. +@@ -187,13 +190,12 @@ + * INFO (output) INTEGER + * = 0: successful exit + * < 0: if INFO = -i, the i-th argument had an illegal value +-* = 1,...,N: the QZ iteration did not converge. (A,B) is not ++* = 1,...,N: the QZ iteration did not converge. (H,T) is not + * in Schur form, but ALPHAR(i), ALPHAI(i), and + * BETA(i), i=INFO+1,...,N should be correct. +-* = N+1,...,2*N: the shift calculation failed. (A,B) is not ++* = N+1,...,2*N: the shift calculation failed. (H,T) is not + * in Schur form, but ALPHAR(i), ALPHAI(i), and + * BETA(i), i=INFO-N+1,...,N should be correct. +-* > 2*N: various "impossible" errors. + * + * Further Details + * =============== +@@ -225,7 +227,7 @@ + $ B1R, B22, B2A, B2I, B2R, BN, BNORM, BSCALE, + $ BTOL, C, C11I, C11R, C12, C21, C22I, C22R, CL, + $ CQ, CR, CZ, ESHIFT, S, S1, S1INV, S2, SAFMAX, +- $ SAFMIN, SCALE, SL, SQI, SQR, SR, SZI, SZR, T, ++ $ SAFMIN, SCALE, SL, SQI, SQR, SR, SZI, SZR, T1, + $ TAU, TEMP, TEMP2, TEMPI, TEMPR, U1, U12, U12L, + $ U2, ULP, VS, W11, W12, W21, W22, WABS, WI, WR, + $ WR2 +@@ -302,9 +304,9 @@ + INFO = -5 + ELSE IF( IHI.GT.N .OR. IHI.LT.ILO-1 ) THEN + INFO = -6 +- ELSE IF( LDA.LT.N ) THEN ++ ELSE IF( LDH.LT.N ) THEN + INFO = -8 +- ELSE IF( LDB.LT.N ) THEN ++ ELSE IF( LDT.LT.N ) THEN + INFO = -10 + ELSE IF( LDQ.LT.1 .OR. ( ILQ .AND. LDQ.LT.N ) ) THEN + INFO = -15 +@@ -340,8 +342,8 @@ + SAFMIN = DLAMCH( 'S' ) + SAFMAX = ONE / SAFMIN + ULP = DLAMCH( 'E' )*DLAMCH( 'B' ) +- ANORM = DLANHS( 'F', IN, A( ILO, ILO ), LDA, WORK ) +- BNORM = DLANHS( 'F', IN, B( ILO, ILO ), LDB, WORK ) ++ ANORM = DLANHS( 'F', IN, H( ILO, ILO ), LDH, WORK ) ++ BNORM = DLANHS( 'F', IN, T( ILO, ILO ), LDT, WORK ) + ATOL = MAX( SAFMIN, ULP*ANORM ) + BTOL = MAX( SAFMIN, ULP*BNORM ) + ASCALE = ONE / MAX( SAFMIN, ANORM ) +@@ -350,15 +352,15 @@ + * Set Eigenvalues IHI+1:N + * + DO 30 J = IHI + 1, N +- IF( B( J, J ).LT.ZERO ) THEN ++ IF( T( J, J ).LT.ZERO ) THEN + IF( ILSCHR ) THEN + DO 10 JR = 1, J +- A( JR, J ) = -A( JR, J ) +- B( JR, J ) = -B( JR, J ) ++ H( JR, J ) = -H( JR, J ) ++ T( JR, J ) = -T( JR, J ) + 10 CONTINUE + ELSE +- A( J, J ) = -A( J, J ) +- B( J, J ) = -B( J, J ) ++ H( J, J ) = -H( J, J ) ++ T( J, J ) = -T( J, J ) + END IF + IF( ILZ ) THEN + DO 20 JR = 1, N +@@ -366,9 +368,9 @@ + 20 CONTINUE + END IF + END IF +- ALPHAR( J ) = A( J, J ) ++ ALPHAR( J ) = H( J, J ) + ALPHAI( J ) = ZERO +- BETA( J ) = B( J, J ) ++ BETA( J ) = T( J, J ) + 30 CONTINUE + * + * If IHI < ILO, skip QZ steps +@@ -408,8 +410,8 @@ + * Split the matrix if possible. + * + * Two tests: +-* 1: A(j,j-1)=0 or j=ILO +-* 2: B(j,j)=0 ++* 1: H(j,j-1)=0 or j=ILO ++* 2: T(j,j)=0 + * + IF( ILAST.EQ.ILO ) THEN + * +@@ -417,14 +419,14 @@ + * + GO TO 80 + ELSE +- IF( ABS( A( ILAST, ILAST-1 ) ).LE.ATOL ) THEN +- A( ILAST, ILAST-1 ) = ZERO ++ IF( ABS( H( ILAST, ILAST-1 ) ).LE.ATOL ) THEN ++ H( ILAST, ILAST-1 ) = ZERO + GO TO 80 + END IF + END IF + * +- IF( ABS( B( ILAST, ILAST ) ).LE.BTOL ) THEN +- B( ILAST, ILAST ) = ZERO ++ IF( ABS( T( ILAST, ILAST ) ).LE.BTOL ) THEN ++ T( ILAST, ILAST ) = ZERO + GO TO 70 + END IF + * +@@ -432,36 +434,36 @@ + * + DO 60 J = ILAST - 1, ILO, -1 + * +-* Test 1: for A(j,j-1)=0 or j=ILO ++* Test 1: for H(j,j-1)=0 or j=ILO + * + IF( J.EQ.ILO ) THEN + ILAZRO = .TRUE. + ELSE +- IF( ABS( A( J, J-1 ) ).LE.ATOL ) THEN +- A( J, J-1 ) = ZERO ++ IF( ABS( H( J, J-1 ) ).LE.ATOL ) THEN ++ H( J, J-1 ) = ZERO + ILAZRO = .TRUE. + ELSE + ILAZRO = .FALSE. + END IF + END IF + * +-* Test 2: for B(j,j)=0 ++* Test 2: for T(j,j)=0 + * +- IF( ABS( B( J, J ) ).LT.BTOL ) THEN +- B( J, J ) = ZERO ++ IF( ABS( T( J, J ) ).LT.BTOL ) THEN ++ T( J, J ) = ZERO + * + * Test 1a: Check for 2 consecutive small subdiagonals in A + * + ILAZR2 = .FALSE. + IF( .NOT.ILAZRO ) THEN +- TEMP = ABS( A( J, J-1 ) ) +- TEMP2 = ABS( A( J, J ) ) ++ TEMP = ABS( H( J, J-1 ) ) ++ TEMP2 = ABS( H( J, J ) ) + TEMPR = MAX( TEMP, TEMP2 ) + IF( TEMPR.LT.ONE .AND. TEMPR.NE.ZERO ) THEN + TEMP = TEMP / TEMPR + TEMP2 = TEMP2 / TEMPR + END IF +- IF( TEMP*( ASCALE*ABS( A( J+1, J ) ) ).LE.TEMP2* ++ IF( TEMP*( ASCALE*ABS( H( J+1, J ) ) ).LE.TEMP2* + $ ( ASCALE*ATOL ) )ILAZR2 = .TRUE. + END IF + * +@@ -473,21 +475,21 @@ + * + IF( ILAZRO .OR. ILAZR2 ) THEN + DO 40 JCH = J, ILAST - 1 +- TEMP = A( JCH, JCH ) +- CALL DLARTG( TEMP, A( JCH+1, JCH ), C, S, +- $ A( JCH, JCH ) ) +- A( JCH+1, JCH ) = ZERO +- CALL DROT( ILASTM-JCH, A( JCH, JCH+1 ), LDA, +- $ A( JCH+1, JCH+1 ), LDA, C, S ) +- CALL DROT( ILASTM-JCH, B( JCH, JCH+1 ), LDB, +- $ B( JCH+1, JCH+1 ), LDB, C, S ) ++ TEMP = H( JCH, JCH ) ++ CALL DLARTG( TEMP, H( JCH+1, JCH ), C, S, ++ $ H( JCH, JCH ) ) ++ H( JCH+1, JCH ) = ZERO ++ CALL DROT( ILASTM-JCH, H( JCH, JCH+1 ), LDH, ++ $ H( JCH+1, JCH+1 ), LDH, C, S ) ++ CALL DROT( ILASTM-JCH, T( JCH, JCH+1 ), LDT, ++ $ T( JCH+1, JCH+1 ), LDT, C, S ) + IF( ILQ ) + $ CALL DROT( N, Q( 1, JCH ), 1, Q( 1, JCH+1 ), 1, + $ C, S ) + IF( ILAZR2 ) +- $ A( JCH, JCH-1 ) = A( JCH, JCH-1 )*C ++ $ H( JCH, JCH-1 ) = H( JCH, JCH-1 )*C + ILAZR2 = .FALSE. +- IF( ABS( B( JCH+1, JCH+1 ) ).GE.BTOL ) THEN ++ IF( ABS( T( JCH+1, JCH+1 ) ).GE.BTOL ) THEN + IF( JCH+1.GE.ILAST ) THEN + GO TO 80 + ELSE +@@ -495,35 +497,35 @@ + GO TO 110 + END IF + END IF +- B( JCH+1, JCH+1 ) = ZERO ++ T( JCH+1, JCH+1 ) = ZERO + 40 CONTINUE + GO TO 70 + ELSE + * +-* Only test 2 passed -- chase the zero to B(ILAST,ILAST) +-* Then process as in the case B(ILAST,ILAST)=0 ++* Only test 2 passed -- chase the zero to T(ILAST,ILAST) ++* Then process as in the case T(ILAST,ILAST)=0 + * + DO 50 JCH = J, ILAST - 1 +- TEMP = B( JCH, JCH+1 ) +- CALL DLARTG( TEMP, B( JCH+1, JCH+1 ), C, S, +- $ B( JCH, JCH+1 ) ) +- B( JCH+1, JCH+1 ) = ZERO ++ TEMP = T( JCH, JCH+1 ) ++ CALL DLARTG( TEMP, T( JCH+1, JCH+1 ), C, S, ++ $ T( JCH, JCH+1 ) ) ++ T( JCH+1, JCH+1 ) = ZERO + IF( JCH.LT.ILASTM-1 ) +- $ CALL DROT( ILASTM-JCH-1, B( JCH, JCH+2 ), LDB, +- $ B( JCH+1, JCH+2 ), LDB, C, S ) +- CALL DROT( ILASTM-JCH+2, A( JCH, JCH-1 ), LDA, +- $ A( JCH+1, JCH-1 ), LDA, C, S ) ++ $ CALL DROT( ILASTM-JCH-1, T( JCH, JCH+2 ), LDT, ++ $ T( JCH+1, JCH+2 ), LDT, C, S ) ++ CALL DROT( ILASTM-JCH+2, H( JCH, JCH-1 ), LDH, ++ $ H( JCH+1, JCH-1 ), LDH, C, S ) + IF( ILQ ) + $ CALL DROT( N, Q( 1, JCH ), 1, Q( 1, JCH+1 ), 1, + $ C, S ) +- TEMP = A( JCH+1, JCH ) +- CALL DLARTG( TEMP, A( JCH+1, JCH-1 ), C, S, +- $ A( JCH+1, JCH ) ) +- A( JCH+1, JCH-1 ) = ZERO +- CALL DROT( JCH+1-IFRSTM, A( IFRSTM, JCH ), 1, +- $ A( IFRSTM, JCH-1 ), 1, C, S ) +- CALL DROT( JCH-IFRSTM, B( IFRSTM, JCH ), 1, +- $ B( IFRSTM, JCH-1 ), 1, C, S ) ++ TEMP = H( JCH+1, JCH ) ++ CALL DLARTG( TEMP, H( JCH+1, JCH-1 ), C, S, ++ $ H( JCH+1, JCH ) ) ++ H( JCH+1, JCH-1 ) = ZERO ++ CALL DROT( JCH+1-IFRSTM, H( IFRSTM, JCH ), 1, ++ $ H( IFRSTM, JCH-1 ), 1, C, S ) ++ CALL DROT( JCH-IFRSTM, T( IFRSTM, JCH ), 1, ++ $ T( IFRSTM, JCH-1 ), 1, C, S ) + IF( ILZ ) + $ CALL DROT( N, Z( 1, JCH ), 1, Z( 1, JCH-1 ), 1, + $ C, S ) +@@ -547,34 +549,34 @@ + INFO = N + 1 + GO TO 420 + * +-* B(ILAST,ILAST)=0 -- clear A(ILAST,ILAST-1) to split off a ++* T(ILAST,ILAST)=0 -- clear H(ILAST,ILAST-1) to split off a + * 1x1 block. + * + 70 CONTINUE +- TEMP = A( ILAST, ILAST ) +- CALL DLARTG( TEMP, A( ILAST, ILAST-1 ), C, S, +- $ A( ILAST, ILAST ) ) +- A( ILAST, ILAST-1 ) = ZERO +- CALL DROT( ILAST-IFRSTM, A( IFRSTM, ILAST ), 1, +- $ A( IFRSTM, ILAST-1 ), 1, C, S ) +- CALL DROT( ILAST-IFRSTM, B( IFRSTM, ILAST ), 1, +- $ B( IFRSTM, ILAST-1 ), 1, C, S ) ++ TEMP = H( ILAST, ILAST ) ++ CALL DLARTG( TEMP, H( ILAST, ILAST-1 ), C, S, ++ $ H( ILAST, ILAST ) ) ++ H( ILAST, ILAST-1 ) = ZERO ++ CALL DROT( ILAST-IFRSTM, H( IFRSTM, ILAST ), 1, ++ $ H( IFRSTM, ILAST-1 ), 1, C, S ) ++ CALL DROT( ILAST-IFRSTM, T( IFRSTM, ILAST ), 1, ++ $ T( IFRSTM, ILAST-1 ), 1, C, S ) + IF( ILZ ) + $ CALL DROT( N, Z( 1, ILAST ), 1, Z( 1, ILAST-1 ), 1, C, S ) + * +-* A(ILAST,ILAST-1)=0 -- Standardize B, set ALPHAR, ALPHAI, ++* H(ILAST,ILAST-1)=0 -- Standardize B, set ALPHAR, ALPHAI, + * and BETA + * + 80 CONTINUE +- IF( B( ILAST, ILAST ).LT.ZERO ) THEN ++ IF( T( ILAST, ILAST ).LT.ZERO ) THEN + IF( ILSCHR ) THEN + DO 90 J = IFRSTM, ILAST +- A( J, ILAST ) = -A( J, ILAST ) +- B( J, ILAST ) = -B( J, ILAST ) ++ H( J, ILAST ) = -H( J, ILAST ) ++ T( J, ILAST ) = -T( J, ILAST ) + 90 CONTINUE + ELSE +- A( ILAST, ILAST ) = -A( ILAST, ILAST ) +- B( ILAST, ILAST ) = -B( ILAST, ILAST ) ++ H( ILAST, ILAST ) = -H( ILAST, ILAST ) ++ T( ILAST, ILAST ) = -T( ILAST, ILAST ) + END IF + IF( ILZ ) THEN + DO 100 J = 1, N +@@ -582,9 +584,9 @@ + 100 CONTINUE + END IF + END IF +- ALPHAR( ILAST ) = A( ILAST, ILAST ) ++ ALPHAR( ILAST ) = H( ILAST, ILAST ) + ALPHAI( ILAST ) = ZERO +- BETA( ILAST ) = B( ILAST, ILAST ) ++ BETA( ILAST ) = T( ILAST, ILAST ) + * + * Go to next block -- exit if finished. + * +@@ -617,7 +619,7 @@ + * Compute single shifts. + * + * At this point, IFIRST < ILAST, and the diagonal elements of +-* B(IFIRST:ILAST,IFIRST,ILAST) are larger than BTOL (in ++* T(IFIRST:ILAST,IFIRST,ILAST) are larger than BTOL (in + * magnitude) + * + IF( ( IITER / 10 )*10.EQ.IITER ) THEN +@@ -625,10 +627,10 @@ + * Exceptional shift. Chosen for no particularly good reason. + * (Single shift only.) + * +- IF( ( DBLE( MAXIT )*SAFMIN )*ABS( A( ILAST-1, ILAST ) ).LT. +- $ ABS( B( ILAST-1, ILAST-1 ) ) ) THEN +- ESHIFT = ESHIFT + A( ILAST-1, ILAST ) / +- $ B( ILAST-1, ILAST-1 ) ++ IF( ( DBLE( MAXIT )*SAFMIN )*ABS( H( ILAST-1, ILAST ) ).LT. ++ $ ABS( T( ILAST-1, ILAST-1 ) ) ) THEN ++ ESHIFT = ESHIFT + H( ILAST-1, ILAST ) / ++ $ T( ILAST-1, ILAST-1 ) + ELSE + ESHIFT = ESHIFT + ONE / ( SAFMIN*DBLE( MAXIT ) ) + END IF +@@ -641,8 +643,8 @@ + * bottom-right 2x2 block of A and B. The first eigenvalue + * returned by DLAG2 is the Wilkinson shift (AEP p.512), + * +- CALL DLAG2( A( ILAST-1, ILAST-1 ), LDA, +- $ B( ILAST-1, ILAST-1 ), LDB, SAFMIN*SAFETY, S1, ++ CALL DLAG2( H( ILAST-1, ILAST-1 ), LDH, ++ $ T( ILAST-1, ILAST-1 ), LDT, SAFMIN*SAFETY, S1, + $ S2, WR, WR2, WI ) + * + TEMP = MAX( S1, SAFMIN*MAX( ONE, ABS( WR ), ABS( WI ) ) ) +@@ -669,14 +671,14 @@ + * + DO 120 J = ILAST - 1, IFIRST + 1, -1 + ISTART = J +- TEMP = ABS( S1*A( J, J-1 ) ) +- TEMP2 = ABS( S1*A( J, J )-WR*B( J, J ) ) ++ TEMP = ABS( S1*H( J, J-1 ) ) ++ TEMP2 = ABS( S1*H( J, J )-WR*T( J, J ) ) + TEMPR = MAX( TEMP, TEMP2 ) + IF( TEMPR.LT.ONE .AND. TEMPR.NE.ZERO ) THEN + TEMP = TEMP / TEMPR + TEMP2 = TEMP2 / TEMPR + END IF +- IF( ABS( ( ASCALE*A( J+1, J ) )*TEMP ).LE.( ASCALE*ATOL )* ++ IF( ABS( ( ASCALE*H( J+1, J ) )*TEMP ).LE.( ASCALE*ATOL )* + $ TEMP2 )GO TO 130 + 120 CONTINUE + * +@@ -687,26 +689,26 @@ + * + * Initial Q + * +- TEMP = S1*A( ISTART, ISTART ) - WR*B( ISTART, ISTART ) +- TEMP2 = S1*A( ISTART+1, ISTART ) ++ TEMP = S1*H( ISTART, ISTART ) - WR*T( ISTART, ISTART ) ++ TEMP2 = S1*H( ISTART+1, ISTART ) + CALL DLARTG( TEMP, TEMP2, C, S, TEMPR ) + * + * Sweep + * + DO 190 J = ISTART, ILAST - 1 + IF( J.GT.ISTART ) THEN +- TEMP = A( J, J-1 ) +- CALL DLARTG( TEMP, A( J+1, J-1 ), C, S, A( J, J-1 ) ) +- A( J+1, J-1 ) = ZERO ++ TEMP = H( J, J-1 ) ++ CALL DLARTG( TEMP, H( J+1, J-1 ), C, S, H( J, J-1 ) ) ++ H( J+1, J-1 ) = ZERO + END IF + * + DO 140 JC = J, ILASTM +- TEMP = C*A( J, JC ) + S*A( J+1, JC ) +- A( J+1, JC ) = -S*A( J, JC ) + C*A( J+1, JC ) +- A( J, JC ) = TEMP +- TEMP2 = C*B( J, JC ) + S*B( J+1, JC ) +- B( J+1, JC ) = -S*B( J, JC ) + C*B( J+1, JC ) +- B( J, JC ) = TEMP2 ++ TEMP = C*H( J, JC ) + S*H( J+1, JC ) ++ H( J+1, JC ) = -S*H( J, JC ) + C*H( J+1, JC ) ++ H( J, JC ) = TEMP ++ TEMP2 = C*T( J, JC ) + S*T( J+1, JC ) ++ T( J+1, JC ) = -S*T( J, JC ) + C*T( J+1, JC ) ++ T( J, JC ) = TEMP2 + 140 CONTINUE + IF( ILQ ) THEN + DO 150 JR = 1, N +@@ -716,19 +718,19 @@ + 150 CONTINUE + END IF + * +- TEMP = B( J+1, J+1 ) +- CALL DLARTG( TEMP, B( J+1, J ), C, S, B( J+1, J+1 ) ) +- B( J+1, J ) = ZERO ++ TEMP = T( J+1, J+1 ) ++ CALL DLARTG( TEMP, T( J+1, J ), C, S, T( J+1, J+1 ) ) ++ T( J+1, J ) = ZERO + * + DO 160 JR = IFRSTM, MIN( J+2, ILAST ) +- TEMP = C*A( JR, J+1 ) + S*A( JR, J ) +- A( JR, J ) = -S*A( JR, J+1 ) + C*A( JR, J ) +- A( JR, J+1 ) = TEMP ++ TEMP = C*H( JR, J+1 ) + S*H( JR, J ) ++ H( JR, J ) = -S*H( JR, J+1 ) + C*H( JR, J ) ++ H( JR, J+1 ) = TEMP + 160 CONTINUE + DO 170 JR = IFRSTM, J +- TEMP = C*B( JR, J+1 ) + S*B( JR, J ) +- B( JR, J ) = -S*B( JR, J+1 ) + C*B( JR, J ) +- B( JR, J+1 ) = TEMP ++ TEMP = C*T( JR, J+1 ) + S*T( JR, J ) ++ T( JR, J ) = -S*T( JR, J+1 ) + C*T( JR, J ) ++ T( JR, J+1 ) = TEMP + 170 CONTINUE + IF( ILZ ) THEN + DO 180 JR = 1, N +@@ -759,8 +761,8 @@ + * B = ( ) with B11 non-negative. + * ( 0 B22 ) + * +- CALL DLASV2( B( ILAST-1, ILAST-1 ), B( ILAST-1, ILAST ), +- $ B( ILAST, ILAST ), B22, B11, SR, CR, SL, CL ) ++ CALL DLASV2( T( ILAST-1, ILAST-1 ), T( ILAST-1, ILAST ), ++ $ T( ILAST, ILAST ), B22, B11, SR, CR, SL, CL ) + * + IF( B11.LT.ZERO ) THEN + CR = -CR +@@ -769,17 +771,17 @@ + B22 = -B22 + END IF + * +- CALL DROT( ILASTM+1-IFIRST, A( ILAST-1, ILAST-1 ), LDA, +- $ A( ILAST, ILAST-1 ), LDA, CL, SL ) +- CALL DROT( ILAST+1-IFRSTM, A( IFRSTM, ILAST-1 ), 1, +- $ A( IFRSTM, ILAST ), 1, CR, SR ) ++ CALL DROT( ILASTM+1-IFIRST, H( ILAST-1, ILAST-1 ), LDH, ++ $ H( ILAST, ILAST-1 ), LDH, CL, SL ) ++ CALL DROT( ILAST+1-IFRSTM, H( IFRSTM, ILAST-1 ), 1, ++ $ H( IFRSTM, ILAST ), 1, CR, SR ) + * + IF( ILAST.LT.ILASTM ) +- $ CALL DROT( ILASTM-ILAST, B( ILAST-1, ILAST+1 ), LDB, +- $ B( ILAST, ILAST+1 ), LDA, CL, SL ) ++ $ CALL DROT( ILASTM-ILAST, T( ILAST-1, ILAST+1 ), LDT, ++ $ T( ILAST, ILAST+1 ), LDH, CL, SL ) + IF( IFRSTM.LT.ILAST-1 ) +- $ CALL DROT( IFIRST-IFRSTM, B( IFRSTM, ILAST-1 ), 1, +- $ B( IFRSTM, ILAST ), 1, CR, SR ) ++ $ CALL DROT( IFIRST-IFRSTM, T( IFRSTM, ILAST-1 ), 1, ++ $ T( IFRSTM, ILAST ), 1, CR, SR ) + * + IF( ILQ ) + $ CALL DROT( N, Q( 1, ILAST-1 ), 1, Q( 1, ILAST ), 1, CL, +@@ -788,17 +790,17 @@ + $ CALL DROT( N, Z( 1, ILAST-1 ), 1, Z( 1, ILAST ), 1, CR, + $ SR ) + * +- B( ILAST-1, ILAST-1 ) = B11 +- B( ILAST-1, ILAST ) = ZERO +- B( ILAST, ILAST-1 ) = ZERO +- B( ILAST, ILAST ) = B22 ++ T( ILAST-1, ILAST-1 ) = B11 ++ T( ILAST-1, ILAST ) = ZERO ++ T( ILAST, ILAST-1 ) = ZERO ++ T( ILAST, ILAST ) = B22 + * + * If B22 is negative, negate column ILAST + * + IF( B22.LT.ZERO ) THEN + DO 210 J = IFRSTM, ILAST +- A( J, ILAST ) = -A( J, ILAST ) +- B( J, ILAST ) = -B( J, ILAST ) ++ H( J, ILAST ) = -H( J, ILAST ) ++ T( J, ILAST ) = -T( J, ILAST ) + 210 CONTINUE + * + IF( ILZ ) THEN +@@ -812,8 +814,8 @@ + * + * Recompute shift + * +- CALL DLAG2( A( ILAST-1, ILAST-1 ), LDA, +- $ B( ILAST-1, ILAST-1 ), LDB, SAFMIN*SAFETY, S1, ++ CALL DLAG2( H( ILAST-1, ILAST-1 ), LDH, ++ $ T( ILAST-1, ILAST-1 ), LDT, SAFMIN*SAFETY, S1, + $ TEMP, WR, TEMP2, WI ) + * + * If standardization has perturbed the shift onto real line, +@@ -825,10 +827,10 @@ + * + * Do EISPACK (QZVAL) computation of alpha and beta + * +- A11 = A( ILAST-1, ILAST-1 ) +- A21 = A( ILAST, ILAST-1 ) +- A12 = A( ILAST-1, ILAST ) +- A22 = A( ILAST, ILAST ) ++ A11 = H( ILAST-1, ILAST-1 ) ++ A21 = H( ILAST, ILAST-1 ) ++ A12 = H( ILAST-1, ILAST ) ++ A22 = H( ILAST, ILAST ) + * + * Compute complex Givens rotation on right + * (Assume some element of C = (sA - wB) > unfl ) +@@ -845,10 +847,10 @@ + * + IF( ABS( C11R )+ABS( C11I )+ABS( C12 ).GT.ABS( C21 )+ + $ ABS( C22R )+ABS( C22I ) ) THEN +- T = DLAPY3( C12, C11R, C11I ) +- CZ = C12 / T +- SZR = -C11R / T +- SZI = -C11I / T ++ T1 = DLAPY3( C12, C11R, C11I ) ++ CZ = C12 / T1 ++ SZR = -C11R / T1 ++ SZI = -C11I / T1 + ELSE + CZ = DLAPY2( C22R, C22I ) + IF( CZ.LE.SAFMIN ) THEN +@@ -858,10 +860,10 @@ + ELSE + TEMPR = C22R / CZ + TEMPI = C22I / CZ +- T = DLAPY2( CZ, C21 ) +- CZ = CZ / T +- SZR = -C21*TEMPR / T +- SZI = C21*TEMPI / T ++ T1 = DLAPY2( CZ, C21 ) ++ CZ = CZ / T1 ++ SZR = -C21*TEMPR / T1 ++ SZI = C21*TEMPI / T1 + END IF + END IF + * +@@ -895,10 +897,10 @@ + SQI = TEMPI*A2R - TEMPR*A2I + END IF + END IF +- T = DLAPY3( CQ, SQR, SQI ) +- CQ = CQ / T +- SQR = SQR / T +- SQI = SQI / T ++ T1 = DLAPY3( CQ, SQR, SQI ) ++ CQ = CQ / T1 ++ SQR = SQR / T1 ++ SQI = SQI / T1 + * + * Compute diagonal elements of QBZ + * +@@ -950,26 +952,26 @@ + * + * We assume that the block is at least 3x3 + * +- AD11 = ( ASCALE*A( ILAST-1, ILAST-1 ) ) / +- $ ( BSCALE*B( ILAST-1, ILAST-1 ) ) +- AD21 = ( ASCALE*A( ILAST, ILAST-1 ) ) / +- $ ( BSCALE*B( ILAST-1, ILAST-1 ) ) +- AD12 = ( ASCALE*A( ILAST-1, ILAST ) ) / +- $ ( BSCALE*B( ILAST, ILAST ) ) +- AD22 = ( ASCALE*A( ILAST, ILAST ) ) / +- $ ( BSCALE*B( ILAST, ILAST ) ) +- U12 = B( ILAST-1, ILAST ) / B( ILAST, ILAST ) +- AD11L = ( ASCALE*A( IFIRST, IFIRST ) ) / +- $ ( BSCALE*B( IFIRST, IFIRST ) ) +- AD21L = ( ASCALE*A( IFIRST+1, IFIRST ) ) / +- $ ( BSCALE*B( IFIRST, IFIRST ) ) +- AD12L = ( ASCALE*A( IFIRST, IFIRST+1 ) ) / +- $ ( BSCALE*B( IFIRST+1, IFIRST+1 ) ) +- AD22L = ( ASCALE*A( IFIRST+1, IFIRST+1 ) ) / +- $ ( BSCALE*B( IFIRST+1, IFIRST+1 ) ) +- AD32L = ( ASCALE*A( IFIRST+2, IFIRST+1 ) ) / +- $ ( BSCALE*B( IFIRST+1, IFIRST+1 ) ) +- U12L = B( IFIRST, IFIRST+1 ) / B( IFIRST+1, IFIRST+1 ) ++ AD11 = ( ASCALE*H( ILAST-1, ILAST-1 ) ) / ++ $ ( BSCALE*T( ILAST-1, ILAST-1 ) ) ++ AD21 = ( ASCALE*H( ILAST, ILAST-1 ) ) / ++ $ ( BSCALE*T( ILAST-1, ILAST-1 ) ) ++ AD12 = ( ASCALE*H( ILAST-1, ILAST ) ) / ++ $ ( BSCALE*T( ILAST, ILAST ) ) ++ AD22 = ( ASCALE*H( ILAST, ILAST ) ) / ++ $ ( BSCALE*T( ILAST, ILAST ) ) ++ U12 = T( ILAST-1, ILAST ) / T( ILAST, ILAST ) ++ AD11L = ( ASCALE*H( IFIRST, IFIRST ) ) / ++ $ ( BSCALE*T( IFIRST, IFIRST ) ) ++ AD21L = ( ASCALE*H( IFIRST+1, IFIRST ) ) / ++ $ ( BSCALE*T( IFIRST, IFIRST ) ) ++ AD12L = ( ASCALE*H( IFIRST, IFIRST+1 ) ) / ++ $ ( BSCALE*T( IFIRST+1, IFIRST+1 ) ) ++ AD22L = ( ASCALE*H( IFIRST+1, IFIRST+1 ) ) / ++ $ ( BSCALE*T( IFIRST+1, IFIRST+1 ) ) ++ AD32L = ( ASCALE*H( IFIRST+2, IFIRST+1 ) ) / ++ $ ( BSCALE*T( IFIRST+1, IFIRST+1 ) ) ++ U12L = T( IFIRST, IFIRST+1 ) / T( IFIRST+1, IFIRST+1 ) + * + V( 1 ) = ( AD11-AD11L )*( AD22-AD11L ) - AD12*AD21 + + $ AD21*U12*AD11L + ( AD12L-AD11L*U12L )*AD21L +@@ -991,27 +993,27 @@ + * Zero (j-1)st column of A + * + IF( J.GT.ISTART ) THEN +- V( 1 ) = A( J, J-1 ) +- V( 2 ) = A( J+1, J-1 ) +- V( 3 ) = A( J+2, J-1 ) ++ V( 1 ) = H( J, J-1 ) ++ V( 2 ) = H( J+1, J-1 ) ++ V( 3 ) = H( J+2, J-1 ) + * +- CALL DLARFG( 3, A( J, J-1 ), V( 2 ), 1, TAU ) ++ CALL DLARFG( 3, H( J, J-1 ), V( 2 ), 1, TAU ) + V( 1 ) = ONE +- A( J+1, J-1 ) = ZERO +- A( J+2, J-1 ) = ZERO ++ H( J+1, J-1 ) = ZERO ++ H( J+2, J-1 ) = ZERO + END IF + * + DO 230 JC = J, ILASTM +- TEMP = TAU*( A( J, JC )+V( 2 )*A( J+1, JC )+V( 3 )* +- $ A( J+2, JC ) ) +- A( J, JC ) = A( J, JC ) - TEMP +- A( J+1, JC ) = A( J+1, JC ) - TEMP*V( 2 ) +- A( J+2, JC ) = A( J+2, JC ) - TEMP*V( 3 ) +- TEMP2 = TAU*( B( J, JC )+V( 2 )*B( J+1, JC )+V( 3 )* +- $ B( J+2, JC ) ) +- B( J, JC ) = B( J, JC ) - TEMP2 +- B( J+1, JC ) = B( J+1, JC ) - TEMP2*V( 2 ) +- B( J+2, JC ) = B( J+2, JC ) - TEMP2*V( 3 ) ++ TEMP = TAU*( H( J, JC )+V( 2 )*H( J+1, JC )+V( 3 )* ++ $ H( J+2, JC ) ) ++ H( J, JC ) = H( J, JC ) - TEMP ++ H( J+1, JC ) = H( J+1, JC ) - TEMP*V( 2 ) ++ H( J+2, JC ) = H( J+2, JC ) - TEMP*V( 3 ) ++ TEMP2 = TAU*( T( J, JC )+V( 2 )*T( J+1, JC )+V( 3 )* ++ $ T( J+2, JC ) ) ++ T( J, JC ) = T( J, JC ) - TEMP2 ++ T( J+1, JC ) = T( J+1, JC ) - TEMP2*V( 2 ) ++ T( J+2, JC ) = T( J+2, JC ) - TEMP2*V( 3 ) + 230 CONTINUE + IF( ILQ ) THEN + DO 240 JR = 1, N +@@ -1028,27 +1030,27 @@ + * Swap rows to pivot + * + ILPIVT = .FALSE. +- TEMP = MAX( ABS( B( J+1, J+1 ) ), ABS( B( J+1, J+2 ) ) ) +- TEMP2 = MAX( ABS( B( J+2, J+1 ) ), ABS( B( J+2, J+2 ) ) ) ++ TEMP = MAX( ABS( T( J+1, J+1 ) ), ABS( T( J+1, J+2 ) ) ) ++ TEMP2 = MAX( ABS( T( J+2, J+1 ) ), ABS( T( J+2, J+2 ) ) ) + IF( MAX( TEMP, TEMP2 ).LT.SAFMIN ) THEN + SCALE = ZERO + U1 = ONE + U2 = ZERO + GO TO 250 + ELSE IF( TEMP.GE.TEMP2 ) THEN +- W11 = B( J+1, J+1 ) +- W21 = B( J+2, J+1 ) +- W12 = B( J+1, J+2 ) +- W22 = B( J+2, J+2 ) +- U1 = B( J+1, J ) +- U2 = B( J+2, J ) ++ W11 = T( J+1, J+1 ) ++ W21 = T( J+2, J+1 ) ++ W12 = T( J+1, J+2 ) ++ W22 = T( J+2, J+2 ) ++ U1 = T( J+1, J ) ++ U2 = T( J+2, J ) + ELSE +- W21 = B( J+1, J+1 ) +- W11 = B( J+2, J+1 ) +- W22 = B( J+1, J+2 ) +- W12 = B( J+2, J+2 ) +- U2 = B( J+1, J ) +- U1 = B( J+2, J ) ++ W21 = T( J+1, J+1 ) ++ W11 = T( J+2, J+1 ) ++ W22 = T( J+1, J+2 ) ++ W12 = T( J+2, J+2 ) ++ U2 = T( J+1, J ) ++ U1 = T( J+2, J ) + END IF + * + * Swap columns if nec. +@@ -1098,9 +1100,9 @@ + * + * Compute Householder Vector + * +- T = SQRT( SCALE**2+U1**2+U2**2 ) +- TAU = ONE + SCALE / T +- VS = -ONE / ( SCALE+T ) ++ T1 = SQRT( SCALE**2+U1**2+U2**2 ) ++ TAU = ONE + SCALE / T1 ++ VS = -ONE / ( SCALE+T1 ) + V( 1 ) = ONE + V( 2 ) = VS*U1 + V( 3 ) = VS*U2 +@@ -1108,18 +1110,18 @@ + * Apply transformations from the right. + * + DO 260 JR = IFRSTM, MIN( J+3, ILAST ) +- TEMP = TAU*( A( JR, J )+V( 2 )*A( JR, J+1 )+V( 3 )* +- $ A( JR, J+2 ) ) +- A( JR, J ) = A( JR, J ) - TEMP +- A( JR, J+1 ) = A( JR, J+1 ) - TEMP*V( 2 ) +- A( JR, J+2 ) = A( JR, J+2 ) - TEMP*V( 3 ) ++ TEMP = TAU*( H( JR, J )+V( 2 )*H( JR, J+1 )+V( 3 )* ++ $ H( JR, J+2 ) ) ++ H( JR, J ) = H( JR, J ) - TEMP ++ H( JR, J+1 ) = H( JR, J+1 ) - TEMP*V( 2 ) ++ H( JR, J+2 ) = H( JR, J+2 ) - TEMP*V( 3 ) + 260 CONTINUE + DO 270 JR = IFRSTM, J + 2 +- TEMP = TAU*( B( JR, J )+V( 2 )*B( JR, J+1 )+V( 3 )* +- $ B( JR, J+2 ) ) +- B( JR, J ) = B( JR, J ) - TEMP +- B( JR, J+1 ) = B( JR, J+1 ) - TEMP*V( 2 ) +- B( JR, J+2 ) = B( JR, J+2 ) - TEMP*V( 3 ) ++ TEMP = TAU*( T( JR, J )+V( 2 )*T( JR, J+1 )+V( 3 )* ++ $ T( JR, J+2 ) ) ++ T( JR, J ) = T( JR, J ) - TEMP ++ T( JR, J+1 ) = T( JR, J+1 ) - TEMP*V( 2 ) ++ T( JR, J+2 ) = T( JR, J+2 ) - TEMP*V( 3 ) + 270 CONTINUE + IF( ILZ ) THEN + DO 280 JR = 1, N +@@ -1130,8 +1132,8 @@ + Z( JR, J+2 ) = Z( JR, J+2 ) - TEMP*V( 3 ) + 280 CONTINUE + END IF +- B( J+1, J ) = ZERO +- B( J+2, J ) = ZERO ++ T( J+1, J ) = ZERO ++ T( J+2, J ) = ZERO + 290 CONTINUE + * + * Last elements: Use Givens rotations +@@ -1139,17 +1141,17 @@ + * Rotations from the left + * + J = ILAST - 1 +- TEMP = A( J, J-1 ) +- CALL DLARTG( TEMP, A( J+1, J-1 ), C, S, A( J, J-1 ) ) +- A( J+1, J-1 ) = ZERO ++ TEMP = H( J, J-1 ) ++ CALL DLARTG( TEMP, H( J+1, J-1 ), C, S, H( J, J-1 ) ) ++ H( J+1, J-1 ) = ZERO + * + DO 300 JC = J, ILASTM +- TEMP = C*A( J, JC ) + S*A( J+1, JC ) +- A( J+1, JC ) = -S*A( J, JC ) + C*A( J+1, JC ) +- A( J, JC ) = TEMP +- TEMP2 = C*B( J, JC ) + S*B( J+1, JC ) +- B( J+1, JC ) = -S*B( J, JC ) + C*B( J+1, JC ) +- B( J, JC ) = TEMP2 ++ TEMP = C*H( J, JC ) + S*H( J+1, JC ) ++ H( J+1, JC ) = -S*H( J, JC ) + C*H( J+1, JC ) ++ H( J, JC ) = TEMP ++ TEMP2 = C*T( J, JC ) + S*T( J+1, JC ) ++ T( J+1, JC ) = -S*T( J, JC ) + C*T( J+1, JC ) ++ T( J, JC ) = TEMP2 + 300 CONTINUE + IF( ILQ ) THEN + DO 310 JR = 1, N +@@ -1161,19 +1163,19 @@ + * + * Rotations from the right. + * +- TEMP = B( J+1, J+1 ) +- CALL DLARTG( TEMP, B( J+1, J ), C, S, B( J+1, J+1 ) ) +- B( J+1, J ) = ZERO ++ TEMP = T( J+1, J+1 ) ++ CALL DLARTG( TEMP, T( J+1, J ), C, S, T( J+1, J+1 ) ) ++ T( J+1, J ) = ZERO + * + DO 320 JR = IFRSTM, ILAST +- TEMP = C*A( JR, J+1 ) + S*A( JR, J ) +- A( JR, J ) = -S*A( JR, J+1 ) + C*A( JR, J ) +- A( JR, J+1 ) = TEMP ++ TEMP = C*H( JR, J+1 ) + S*H( JR, J ) ++ H( JR, J ) = -S*H( JR, J+1 ) + C*H( JR, J ) ++ H( JR, J+1 ) = TEMP + 320 CONTINUE + DO 330 JR = IFRSTM, ILAST - 1 +- TEMP = C*B( JR, J+1 ) + S*B( JR, J ) +- B( JR, J ) = -S*B( JR, J+1 ) + C*B( JR, J ) +- B( JR, J+1 ) = TEMP ++ TEMP = C*T( JR, J+1 ) + S*T( JR, J ) ++ T( JR, J ) = -S*T( JR, J+1 ) + C*T( JR, J ) ++ T( JR, J+1 ) = TEMP + 330 CONTINUE + IF( ILZ ) THEN + DO 340 JR = 1, N +@@ -1207,15 +1209,15 @@ + * Set Eigenvalues 1:ILO-1 + * + DO 410 J = 1, ILO - 1 +- IF( B( J, J ).LT.ZERO ) THEN ++ IF( T( J, J ).LT.ZERO ) THEN + IF( ILSCHR ) THEN + DO 390 JR = 1, J +- A( JR, J ) = -A( JR, J ) +- B( JR, J ) = -B( JR, J ) ++ H( JR, J ) = -H( JR, J ) ++ T( JR, J ) = -T( JR, J ) + 390 CONTINUE + ELSE +- A( J, J ) = -A( J, J ) +- B( J, J ) = -B( J, J ) ++ H( J, J ) = -H( J, J ) ++ T( J, J ) = -T( J, J ) + END IF + IF( ILZ ) THEN + DO 400 JR = 1, N +@@ -1223,9 +1225,9 @@ + 400 CONTINUE + END IF + END IF +- ALPHAR( J ) = A( J, J ) ++ ALPHAR( J ) = H( J, J ) + ALPHAI( J ) = ZERO +- BETA( J ) = B( J, J ) ++ BETA( J ) = T( J, J ) + 410 CONTINUE + * + * Normal Termination +diff -uNr LAPACK.orig/SRC/dlasr.f LAPACK/SRC/dlasr.f +--- LAPACK.orig/SRC/dlasr.f Thu Nov 4 14:24:50 1999 ++++ LAPACK/SRC/dlasr.f Fri May 25 16:12:31 2001 +@@ -3,7 +3,7 @@ + * -- LAPACK auxiliary routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* October 31, 1992 ++* May 3, 2001 + * + * .. Scalar Arguments .. + CHARACTER DIRECT, PIVOT, SIDE +@@ -16,44 +16,77 @@ + * Purpose + * ======= + * +-* DLASR performs the transformation +-* +-* A := P*A, when SIDE = 'L' or 'l' ( Left-hand side ) +-* +-* A := A*P', when SIDE = 'R' or 'r' ( Right-hand side ) +-* +-* where A is an m by n real matrix and P is an orthogonal matrix, +-* consisting of a sequence of plane rotations determined by the +-* parameters PIVOT and DIRECT as follows ( z = m when SIDE = 'L' or 'l' +-* and z = n when SIDE = 'R' or 'r' ): +-* +-* When DIRECT = 'F' or 'f' ( Forward sequence ) then +-* +-* P = P( z - 1 )*...*P( 2 )*P( 1 ), +-* +-* and when DIRECT = 'B' or 'b' ( Backward sequence ) then +-* +-* P = P( 1 )*P( 2 )*...*P( z - 1 ), +-* +-* where P( k ) is a plane rotation matrix for the following planes: +-* +-* when PIVOT = 'V' or 'v' ( Variable pivot ), +-* the plane ( k, k + 1 ) +-* +-* when PIVOT = 'T' or 't' ( Top pivot ), +-* the plane ( 1, k + 1 ) +-* +-* when PIVOT = 'B' or 'b' ( Bottom pivot ), +-* the plane ( k, z ) +-* +-* c( k ) and s( k ) must contain the cosine and sine that define the +-* matrix P( k ). The two by two plane rotation part of the matrix +-* P( k ), R( k ), is assumed to be of the form +-* +-* R( k ) = ( c( k ) s( k ) ). +-* ( -s( k ) c( k ) ) +-* +-* This version vectorises across rows of the array A when SIDE = 'L'. ++* DLASR applies a sequence of plane rotations to a real matrix A, ++* from either the left or the right. ++* ++* When SIDE = 'L', the transformation takes the form ++* ++* A := P*A ++* ++* and when SIDE = 'R', the transformation takes the form ++* ++* A := A*P**T ++* ++* where P is an orthogonal matrix consisting of a sequence of z plane ++* rotations, with z = M when SIDE = 'L' and z = N when SIDE = 'R', ++* and P**T is the transpose of P. ++* ++* When DIRECT = 'F' (Forward sequence), then ++* ++* P = P(z-1) * ... * P(2) * P(1) ++* ++* and when DIRECT = 'B' (Backward sequence), then ++* ++* P = P(1) * P(2) * ... * P(z-1) ++* ++* where P(k) is a plane rotation matrix defined by the 2-by-2 rotation ++* ++* R(k) = ( c(k) s(k) ) ++* = ( -s(k) c(k) ). ++* ++* When PIVOT = 'V' (Variable pivot), the rotation is performed ++* for the plane (k,k+1), i.e., P(k) has the form ++* ++* P(k) = ( 1 ) ++* ( ... ) ++* ( 1 ) ++* ( c(k) s(k) ) ++* ( -s(k) c(k) ) ++* ( 1 ) ++* ( ... ) ++* ( 1 ) ++* ++* where R(k) appears as a rank-2 modification to the identity matrix in ++* rows and columns k and k+1. ++* ++* When PIVOT = 'T' (Top pivot), the rotation is performed for the ++* plane (1,k+1), so P(k) has the form ++* ++* P(k) = ( c(k) s(k) ) ++* ( 1 ) ++* ( ... ) ++* ( 1 ) ++* ( -s(k) c(k) ) ++* ( 1 ) ++* ( ... ) ++* ( 1 ) ++* ++* where R(k) appears in rows and columns 1 and k+1. ++* ++* Similarly, when PIVOT = 'B' (Bottom pivot), the rotation is ++* performed for the plane (k,z), giving P(k) the form ++* ++* P(k) = ( 1 ) ++* ( ... ) ++* ( 1 ) ++* ( c(k) s(k) ) ++* ( 1 ) ++* ( ... ) ++* ( 1 ) ++* ( -s(k) c(k) ) ++* ++* where R(k) appears in rows and columns k and z. The rotations are ++* performed without ever forming P(k) explicitly. + * + * Arguments + * ========= +@@ -62,13 +95,13 @@ + * Specifies whether the plane rotation matrix P is applied to + * A on the left or the right. + * = 'L': Left, compute A := P*A +-* = 'R': Right, compute A:= A*P' ++* = 'R': Right, compute A:= A*P**T + * + * DIRECT (input) CHARACTER*1 + * Specifies whether P is a forward or backward sequence of + * plane rotations. +-* = 'F': Forward, P = P( z - 1 )*...*P( 2 )*P( 1 ) +-* = 'B': Backward, P = P( 1 )*P( 2 )*...*P( z - 1 ) ++* = 'F': Forward, P = P(z-1)*...*P(2)*P(1) ++* = 'B': Backward, P = P(1)*P(2)*...*P(z-1) + * + * PIVOT (input) CHARACTER*1 + * Specifies the plane for which P(k) is a plane rotation +@@ -85,18 +118,22 @@ + * The number of columns of the matrix A. If n <= 1, an + * immediate return is effected. + * +-* C, S (input) DOUBLE PRECISION arrays, dimension ++* C (input) DOUBLE PRECISION array, dimension ++* (M-1) if SIDE = 'L' ++* (N-1) if SIDE = 'R' ++* The cosines c(k) of the plane rotations. ++* ++* S (input) DOUBLE PRECISION array, dimension + * (M-1) if SIDE = 'L' + * (N-1) if SIDE = 'R' +-* c(k) and s(k) contain the cosine and sine that define the +-* matrix P(k). The two by two plane rotation part of the +-* matrix P(k), R(k), is assumed to be of the form +-* R( k ) = ( c( k ) s( k ) ). +-* ( -s( k ) c( k ) ) ++* The sines s(k) of the plane rotations. The 2-by-2 plane ++* rotation part of the matrix P(k), R(k), has the form ++* R(k) = ( c(k) s(k) ) ++* ( -s(k) c(k) ). + * + * A (input/output) DOUBLE PRECISION array, dimension (LDA,N) +-* The m by n matrix A. On exit, A is overwritten by P*A if +-* SIDE = 'R' or by A*P' if SIDE = 'L'. ++* The M-by-N matrix A. On exit, A is overwritten by P*A if ++* SIDE = 'R' or by A*P**T if SIDE = 'L'. + * + * LDA (input) INTEGER + * The leading dimension of the array A. LDA >= max(1,M). +diff -uNr LAPACK.orig/SRC/dsbgst.f LAPACK/SRC/dsbgst.f +--- LAPACK.orig/SRC/dsbgst.f Thu Nov 4 14:23:31 1999 ++++ LAPACK/SRC/dsbgst.f Fri May 25 16:12:50 2001 +@@ -4,7 +4,7 @@ + * -- LAPACK routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* June 30, 1999 ++* January 9, 2001 + * + * .. Scalar Arguments .. + CHARACTER UPLO, VECT +@@ -125,7 +125,7 @@ + INFO = -3 + ELSE IF( KA.LT.0 ) THEN + INFO = -4 +- ELSE IF( KB.LT.0 ) THEN ++ ELSE IF( KB.LT.0 .OR. KB.GT.KA ) THEN + INFO = -5 + ELSE IF( LDAB.LT.KA+1 ) THEN + INFO = -7 +diff -uNr LAPACK.orig/SRC/dstebz.f LAPACK/SRC/dstebz.f +--- LAPACK.orig/SRC/dstebz.f Thu Nov 4 14:24:57 1999 ++++ LAPACK/SRC/dstebz.f Fri May 25 16:13:23 2001 +@@ -6,6 +6,7 @@ + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University + * June 30, 1999 ++* 8-18-00: Increase FUDGE factor for T3E (eca) + * + * .. Scalar Arguments .. + CHARACTER ORDER, RANGE +@@ -175,7 +176,7 @@ + PARAMETER ( ZERO = 0.0D0, ONE = 1.0D0, TWO = 2.0D0, + $ HALF = 1.0D0 / TWO ) + DOUBLE PRECISION FUDGE, RELFAC +- PARAMETER ( FUDGE = 2.0D0, RELFAC = 2.0D0 ) ++ PARAMETER ( FUDGE = 2.1D0, RELFAC = 2.0D0 ) + * .. + * .. Local Scalars .. + LOGICAL NCNVRG, TOOFEW +diff -uNr LAPACK.orig/SRC/dtgevc.f LAPACK/SRC/dtgevc.f +--- LAPACK.orig/SRC/dtgevc.f Thu Nov 4 14:26:09 1999 ++++ LAPACK/SRC/dtgevc.f Fri May 25 16:13:33 2001 +@@ -1,18 +1,18 @@ +- SUBROUTINE DTGEVC( SIDE, HOWMNY, SELECT, N, A, LDA, B, LDB, VL, ++ SUBROUTINE DTGEVC( SIDE, HOWMNY, SELECT, N, S, LDS, P, LDP, VL, + $ LDVL, VR, LDVR, MM, M, WORK, INFO ) + * + * -- LAPACK routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* June 30, 1999 ++* May 4, 2001 + * + * .. Scalar Arguments .. + CHARACTER HOWMNY, SIDE +- INTEGER INFO, LDA, LDB, LDVL, LDVR, M, MM, N ++ INTEGER INFO, LDP, LDS, LDVL, LDVR, M, MM, N + * .. + * .. Array Arguments .. + LOGICAL SELECT( * ) +- DOUBLE PRECISION A( LDA, * ), B( LDB, * ), VL( LDVL, * ), ++ DOUBLE PRECISION P( LDP, * ), S( LDS, * ), VL( LDVL, * ), + $ VR( LDVR, * ), WORK( * ) + * .. + * +@@ -20,35 +20,31 @@ + * Purpose + * ======= + * +-* DTGEVC computes some or all of the right and/or left generalized +-* eigenvectors of a pair of real upper triangular matrices (A,B). +-* +-* The right generalized eigenvector x and the left generalized +-* eigenvector y of (A,B) corresponding to a generalized eigenvalue +-* w are defined by: +-* +-* (A - wB) * x = 0 and y**H * (A - wB) = 0 +-* ++* DTGEVC computes some or all of the right and/or left eigenvectors of ++* a pair of real matrices (S,P), where S is a quasi-triangular matrix ++* and P is upper triangular. Matrix pairs of this type are produced by ++* the generalized Schur factorization of a matrix pair (A,B): ++* ++* A = Q*S*Z**T, B = Q*P*Z**T ++* ++* as computed by DGGHRD + DHGEQZ. ++* ++* The right eigenvector x and the left eigenvector y of (S,P) ++* corresponding to an eigenvalue w are defined by: ++* ++* S*x = w*P*x, (y**H)*S = w*(y**H)*P, ++* + * where y**H denotes the conjugate tranpose of y. +-* +-* If an eigenvalue w is determined by zero diagonal elements of both A +-* and B, a unit vector is returned as the corresponding eigenvector. +-* +-* If all eigenvectors are requested, the routine may either return +-* the matrices X and/or Y of right or left eigenvectors of (A,B), or +-* the products Z*X and/or Q*Y, where Z and Q are input orthogonal +-* matrices. If (A,B) was obtained from the generalized real-Schur +-* factorization of an original pair of matrices +-* (A0,B0) = (Q*A*Z**H,Q*B*Z**H), +-* then Z*X and Q*Y are the matrices of right or left eigenvectors of +-* A. +-* +-* A must be block upper triangular, with 1-by-1 and 2-by-2 diagonal +-* blocks. Corresponding to each 2-by-2 diagonal block is a complex +-* conjugate pair of eigenvalues and eigenvectors; only one +-* eigenvector of the pair is computed, namely the one corresponding +-* to the eigenvalue with positive imaginary part. +-* ++* The eigenvalues are not input to this routine, but are computed ++* directly from the diagonal blocks of S and P. ++* ++* This routine returns the matrices X and/or Y of right and left ++* eigenvectors of (S,P), or the products Z*X and/or Q*Y, ++* where Z and Q are input matrices. ++* If Q and Z are the orthogonal factors from the generalized Schur ++* factorization of a matrix pair (A,B), then Z*X and Q*Y ++* are the matrices of right and left eigenvectors of (A,B). ++* + * Arguments + * ========= + * +@@ -59,78 +55,84 @@ + * + * HOWMNY (input) CHARACTER*1 + * = 'A': compute all right and/or left eigenvectors; +-* = 'B': compute all right and/or left eigenvectors, and +-* backtransform them using the input matrices supplied +-* in VR and/or VL; ++* = 'B': compute all right and/or left eigenvectors, ++* backtransformed by the matrices in VR and/or VL; + * = 'S': compute selected right and/or left eigenvectors, + * specified by the logical array SELECT. + * + * SELECT (input) LOGICAL array, dimension (N) + * If HOWMNY='S', SELECT specifies the eigenvectors to be +-* computed. +-* If HOWMNY='A' or 'B', SELECT is not referenced. +-* To select the real eigenvector corresponding to the real +-* eigenvalue w(j), SELECT(j) must be set to .TRUE. To select +-* the complex eigenvector corresponding to a complex conjugate +-* pair w(j) and w(j+1), either SELECT(j) or SELECT(j+1) must +-* be set to .TRUE.. ++* computed. If w(j) is a real eigenvalue, the corresponding ++* real eigenvector is computed if SELECT(j) is .TRUE.. ++* If w(j) and w(j+1) are the real and imaginary parts of a ++* complex eigenvalue, the corresponding complex eigenvector ++* is computed if either SELECT(j) or SELECT(j+1) is .TRUE., ++* and on exit SELECT(j) is set to .TRUE. and SELECT(j+1) is ++* set to .FALSE.. ++* Not referenced if HOWMNY = 'A' or 'B'. + * + * N (input) INTEGER +-* The order of the matrices A and B. N >= 0. ++* The order of the matrices S and P. N >= 0. + * +-* A (input) DOUBLE PRECISION array, dimension (LDA,N) +-* The upper quasi-triangular matrix A. ++* S (input) DOUBLE PRECISION array, dimension (LDS,N) ++* The upper quasi-triangular matrix S from a generalized Schur ++* factorization, as computed by DHGEQZ. ++* ++* LDS (input) INTEGER ++* The leading dimension of array S. LDS >= max(1,N). ++* ++* P (input) DOUBLE PRECISION array, dimension (LDP,N) ++* The upper triangular matrix P from a generalized Schur ++* factorization, as computed by DHGEQZ. ++* 2-by-2 diagonal blocks of P corresponding to 2-by-2 blocks ++* of S must be in positive diagonal form. + * +-* LDA (input) INTEGER +-* The leading dimension of array A. LDA >= max(1, N). +-* +-* B (input) DOUBLE PRECISION array, dimension (LDB,N) +-* The upper triangular matrix B. If A has a 2-by-2 diagonal +-* block, then the corresponding 2-by-2 block of B must be +-* diagonal with positive elements. +-* +-* LDB (input) INTEGER +-* The leading dimension of array B. LDB >= max(1,N). ++* LDP (input) INTEGER ++* The leading dimension of array P. LDP >= max(1,N). + * + * VL (input/output) DOUBLE PRECISION array, dimension (LDVL,MM) + * On entry, if SIDE = 'L' or 'B' and HOWMNY = 'B', VL must + * contain an N-by-N matrix Q (usually the orthogonal matrix Q + * of left Schur vectors returned by DHGEQZ). + * On exit, if SIDE = 'L' or 'B', VL contains: +-* if HOWMNY = 'A', the matrix Y of left eigenvectors of (A,B); ++* if HOWMNY = 'A', the matrix Y of left eigenvectors of (S,P); + * if HOWMNY = 'B', the matrix Q*Y; +-* if HOWMNY = 'S', the left eigenvectors of (A,B) specified by ++* if HOWMNY = 'S', the left eigenvectors of (S,P) specified by + * SELECT, stored consecutively in the columns of + * VL, in the same order as their eigenvalues. +-* If SIDE = 'R', VL is not referenced. + * + * A complex eigenvector corresponding to a complex eigenvalue + * is stored in two consecutive columns, the first holding the + * real part, and the second the imaginary part. + * ++* Not referenced if SIDE = 'R'. ++* + * LDVL (input) INTEGER +-* The leading dimension of array VL. +-* LDVL >= max(1,N) if SIDE = 'L' or 'B'; LDVL >= 1 otherwise. ++* The leading dimension of array VL. LDVL >= 1, and if ++* SIDE = 'L' or 'B', LDVL >= N. + * + * VR (input/output) DOUBLE PRECISION array, dimension (LDVR,MM) + * On entry, if SIDE = 'R' or 'B' and HOWMNY = 'B', VR must +-* contain an N-by-N matrix Q (usually the orthogonal matrix Z ++* contain an N-by-N matrix Z (usually the orthogonal matrix Z + * of right Schur vectors returned by DHGEQZ). ++* + * On exit, if SIDE = 'R' or 'B', VR contains: +-* if HOWMNY = 'A', the matrix X of right eigenvectors of (A,B); +-* if HOWMNY = 'B', the matrix Z*X; +-* if HOWMNY = 'S', the right eigenvectors of (A,B) specified by +-* SELECT, stored consecutively in the columns of +-* VR, in the same order as their eigenvalues. +-* If SIDE = 'L', VR is not referenced. ++* if HOWMNY = 'A', the matrix X of right eigenvectors of (S,P); ++* if HOWMNY = 'B' or 'b', the matrix Z*X; ++* if HOWMNY = 'S' or 's', the right eigenvectors of (S,P) ++* specified by SELECT, stored consecutively in the ++* columns of VR, in the same order as their ++* eigenvalues. + * + * A complex eigenvector corresponding to a complex eigenvalue + * is stored in two consecutive columns, the first holding the + * real part and the second the imaginary part. ++* ++* Not referenced if SIDE = 'L'. + * + * LDVR (input) INTEGER +-* The leading dimension of the array VR. +-* LDVR >= max(1,N) if SIDE = 'R' or 'B'; LDVR >= 1 otherwise. ++* The leading dimension of the array VR. LDVR >= 1, and if ++* SIDE = 'R' or 'B', LDVR >= N. + * + * MM (input) INTEGER + * The number of columns in the arrays VL and/or VR. MM >= M. +@@ -199,7 +201,7 @@ + * partial sums. Since FORTRAN arrays are stored columnwise, this has + * the advantage that at each step, the elements of C that are accessed + * are adjacent to one another, whereas with the rowwise method, the +-* elements accessed at a step are spaced LDA (and LDB) words apart. ++* elements accessed at a step are spaced LDS (and LDP) words apart. + * + * When finding left eigenvectors, the matrix in question is the + * transpose of the one in storage, so the rowwise method then +@@ -226,8 +228,8 @@ + $ XSCALE + * .. + * .. Local Arrays .. +- DOUBLE PRECISION BDIAG( 2 ), SUM( 2, 2 ), SUMA( 2, 2 ), +- $ SUMB( 2, 2 ) ++ DOUBLE PRECISION BDIAG( 2 ), SUM( 2, 2 ), SUMS( 2, 2 ), ++ $ SUMP( 2, 2 ) + * .. + * .. External Functions .. + LOGICAL LSAME +@@ -235,7 +237,7 @@ + EXTERNAL LSAME, DLAMCH + * .. + * .. External Subroutines .. +- EXTERNAL DGEMV, DLACPY, DLAG2, DLALN2, XERBLA ++ EXTERNAL DGEMV, DLABAD, DLACPY, DLAG2, DLALN2, XERBLA + * .. + * .. Intrinsic Functions .. + INTRINSIC ABS, MAX, MIN +@@ -252,7 +254,7 @@ + IHWMNY = 2 + ILALL = .FALSE. + ILBACK = .FALSE. +- ELSE IF( LSAME( HOWMNY, 'B' ) .OR. LSAME( HOWMNY, 'T' ) ) THEN ++ ELSE IF( LSAME( HOWMNY, 'B' ) ) THEN + IHWMNY = 3 + ILALL = .TRUE. + ILBACK = .TRUE. +@@ -284,9 +286,9 @@ + INFO = -2 + ELSE IF( N.LT.0 ) THEN + INFO = -4 +- ELSE IF( LDA.LT.MAX( 1, N ) ) THEN ++ ELSE IF( LDS.LT.MAX( 1, N ) ) THEN + INFO = -6 +- ELSE IF( LDB.LT.MAX( 1, N ) ) THEN ++ ELSE IF( LDP.LT.MAX( 1, N ) ) THEN + INFO = -8 + END IF + IF( INFO.NE.0 ) THEN +@@ -305,7 +307,7 @@ + GO TO 10 + END IF + IF( J.LT.N ) THEN +- IF( A( J+1, J ).NE.ZERO ) ++ IF( S( J+1, J ).NE.ZERO ) + $ ILCPLX = .TRUE. + END IF + IF( ILCPLX ) THEN +@@ -325,11 +327,11 @@ + ILABAD = .FALSE. + ILBBAD = .FALSE. + DO 20 J = 1, N - 1 +- IF( A( J+1, J ).NE.ZERO ) THEN +- IF( B( J, J ).EQ.ZERO .OR. B( J+1, J+1 ).EQ.ZERO .OR. +- $ B( J, J+1 ).NE.ZERO )ILBBAD = .TRUE. ++ IF( S( J+1, J ).NE.ZERO ) THEN ++ IF( P( J, J ).EQ.ZERO .OR. P( J+1, J+1 ).EQ.ZERO .OR. ++ $ P( J, J+1 ).NE.ZERO )ILBBAD = .TRUE. + IF( J.LT.N-1 ) THEN +- IF( A( J+2, J+1 ).NE.ZERO ) ++ IF( S( J+2, J+1 ).NE.ZERO ) + $ ILABAD = .TRUE. + END IF + END IF +@@ -372,30 +374,30 @@ + * blocks) of A and B to check for possible overflow in the + * triangular solver. + * +- ANORM = ABS( A( 1, 1 ) ) ++ ANORM = ABS( S( 1, 1 ) ) + IF( N.GT.1 ) +- $ ANORM = ANORM + ABS( A( 2, 1 ) ) +- BNORM = ABS( B( 1, 1 ) ) ++ $ ANORM = ANORM + ABS( S( 2, 1 ) ) ++ BNORM = ABS( P( 1, 1 ) ) + WORK( 1 ) = ZERO + WORK( N+1 ) = ZERO + * + DO 50 J = 2, N + TEMP = ZERO + TEMP2 = ZERO +- IF( A( J, J-1 ).EQ.ZERO ) THEN ++ IF( S( J, J-1 ).EQ.ZERO ) THEN + IEND = J - 1 + ELSE + IEND = J - 2 + END IF + DO 30 I = 1, IEND +- TEMP = TEMP + ABS( A( I, J ) ) +- TEMP2 = TEMP2 + ABS( B( I, J ) ) ++ TEMP = TEMP + ABS( S( I, J ) ) ++ TEMP2 = TEMP2 + ABS( P( I, J ) ) + 30 CONTINUE + WORK( J ) = TEMP + WORK( N+J ) = TEMP2 + DO 40 I = IEND + 1, MIN( J+1, N ) +- TEMP = TEMP + ABS( A( I, J ) ) +- TEMP2 = TEMP2 + ABS( B( I, J ) ) ++ TEMP = TEMP + ABS( S( I, J ) ) ++ TEMP2 = TEMP2 + ABS( P( I, J ) ) + 40 CONTINUE + ANORM = MAX( ANORM, TEMP ) + BNORM = MAX( BNORM, TEMP2 ) +@@ -425,7 +427,7 @@ + END IF + NW = 1 + IF( JE.LT.N ) THEN +- IF( A( JE+1, JE ).NE.ZERO ) THEN ++ IF( S( JE+1, JE ).NE.ZERO ) THEN + ILCPLX = .TRUE. + NW = 2 + END IF +@@ -444,8 +446,8 @@ + * (c) complex eigenvalue. + * + IF( .NOT.ILCPLX ) THEN +- IF( ABS( A( JE, JE ) ).LE.SAFMIN .AND. +- $ ABS( B( JE, JE ) ).LE.SAFMIN ) THEN ++ IF( ABS( S( JE, JE ) ).LE.SAFMIN .AND. ++ $ ABS( P( JE, JE ) ).LE.SAFMIN ) THEN + * + * Singular matrix pencil -- return unit eigenvector + * +@@ -472,10 +474,10 @@ + * + * Real eigenvalue + * +- TEMP = ONE / MAX( ABS( A( JE, JE ) )*ASCALE, +- $ ABS( B( JE, JE ) )*BSCALE, SAFMIN ) +- SALFAR = ( TEMP*A( JE, JE ) )*ASCALE +- SBETA = ( TEMP*B( JE, JE ) )*BSCALE ++ TEMP = ONE / MAX( ABS( S( JE, JE ) )*ASCALE, ++ $ ABS( P( JE, JE ) )*BSCALE, SAFMIN ) ++ SALFAR = ( TEMP*S( JE, JE ) )*ASCALE ++ SBETA = ( TEMP*P( JE, JE ) )*BSCALE + ACOEF = SBETA*ASCALE + BCOEFR = SALFAR*BSCALE + BCOEFI = ZERO +@@ -517,7 +519,7 @@ + * + * Complex eigenvalue + * +- CALL DLAG2( A( JE, JE ), LDA, B( JE, JE ), LDB, ++ CALL DLAG2( S( JE, JE ), LDS, P( JE, JE ), LDP, + $ SAFMIN*SAFETY, ACOEF, TEMP, BCOEFR, TEMP2, + $ BCOEFI ) + BCOEFI = -BCOEFI +@@ -549,9 +551,9 @@ + * + * Compute first two components of eigenvector + * +- TEMP = ACOEF*A( JE+1, JE ) +- TEMP2R = ACOEF*A( JE, JE ) - BCOEFR*B( JE, JE ) +- TEMP2I = -BCOEFI*B( JE, JE ) ++ TEMP = ACOEF*S( JE+1, JE ) ++ TEMP2R = ACOEF*S( JE, JE ) - BCOEFR*P( JE, JE ) ++ TEMP2I = -BCOEFI*P( JE, JE ) + IF( ABS( TEMP ).GT.ABS( TEMP2R )+ABS( TEMP2I ) ) THEN + WORK( 2*N+JE ) = ONE + WORK( 3*N+JE ) = ZERO +@@ -560,10 +562,10 @@ + ELSE + WORK( 2*N+JE+1 ) = ONE + WORK( 3*N+JE+1 ) = ZERO +- TEMP = ACOEF*A( JE, JE+1 ) +- WORK( 2*N+JE ) = ( BCOEFR*B( JE+1, JE+1 )-ACOEF* +- $ A( JE+1, JE+1 ) ) / TEMP +- WORK( 3*N+JE ) = BCOEFI*B( JE+1, JE+1 ) / TEMP ++ TEMP = ACOEF*S( JE, JE+1 ) ++ WORK( 2*N+JE ) = ( BCOEFR*P( JE+1, JE+1 )-ACOEF* ++ $ S( JE+1, JE+1 ) ) / TEMP ++ WORK( 3*N+JE ) = BCOEFI*P( JE+1, JE+1 ) / TEMP + END IF + XMAX = MAX( ABS( WORK( 2*N+JE ) )+ABS( WORK( 3*N+JE ) ), + $ ABS( WORK( 2*N+JE+1 ) )+ABS( WORK( 3*N+JE+1 ) ) ) +@@ -586,11 +588,11 @@ + END IF + * + NA = 1 +- BDIAG( 1 ) = B( J, J ) ++ BDIAG( 1 ) = P( J, J ) + IF( J.LT.N ) THEN +- IF( A( J+1, J ).NE.ZERO ) THEN ++ IF( S( J+1, J ).NE.ZERO ) THEN + IL2BY2 = .TRUE. +- BDIAG( 2 ) = B( J+1, J+1 ) ++ BDIAG( 2 ) = P( J+1, J+1 ) + NA = 2 + END IF + END IF +@@ -616,13 +618,13 @@ + * Compute dot products + * + * j-1 +-* SUM = sum conjg( a*A(k,j) - b*B(k,j) )*x(k) ++* SUM = sum conjg( a*S(k,j) - b*P(k,j) )*x(k) + * k=je + * + * To reduce the op count, this is done as + * + * _ j-1 _ j-1 +-* a*conjg( sum A(k,j)*x(k) ) - b*conjg( sum B(k,j)*x(k) ) ++* a*conjg( sum S(k,j)*x(k) ) - b*conjg( sum P(k,j)*x(k) ) + * k=je k=je + * + * which may cause underflow problems if A or B are close +@@ -659,15 +661,15 @@ + *$PL$ CMCHAR='*' + * + DO 110 JA = 1, NA +- SUMA( JA, JW ) = ZERO +- SUMB( JA, JW ) = ZERO ++ SUMS( JA, JW ) = ZERO ++ SUMP( JA, JW ) = ZERO + * + DO 100 JR = JE, J - 1 +- SUMA( JA, JW ) = SUMA( JA, JW ) + +- $ A( JR, J+JA-1 )* ++ SUMS( JA, JW ) = SUMS( JA, JW ) + ++ $ S( JR, J+JA-1 )* + $ WORK( ( JW+1 )*N+JR ) +- SUMB( JA, JW ) = SUMB( JA, JW ) + +- $ B( JR, J+JA-1 )* ++ SUMP( JA, JW ) = SUMP( JA, JW ) + ++ $ P( JR, J+JA-1 )* + $ WORK( ( JW+1 )*N+JR ) + 100 CONTINUE + 110 CONTINUE +@@ -687,15 +689,15 @@ + * + DO 130 JA = 1, NA + IF( ILCPLX ) THEN +- SUM( JA, 1 ) = -ACOEF*SUMA( JA, 1 ) + +- $ BCOEFR*SUMB( JA, 1 ) - +- $ BCOEFI*SUMB( JA, 2 ) +- SUM( JA, 2 ) = -ACOEF*SUMA( JA, 2 ) + +- $ BCOEFR*SUMB( JA, 2 ) + +- $ BCOEFI*SUMB( JA, 1 ) ++ SUM( JA, 1 ) = -ACOEF*SUMS( JA, 1 ) + ++ $ BCOEFR*SUMP( JA, 1 ) - ++ $ BCOEFI*SUMP( JA, 2 ) ++ SUM( JA, 2 ) = -ACOEF*SUMS( JA, 2 ) + ++ $ BCOEFR*SUMP( JA, 2 ) + ++ $ BCOEFI*SUMP( JA, 1 ) + ELSE +- SUM( JA, 1 ) = -ACOEF*SUMA( JA, 1 ) + +- $ BCOEFR*SUMB( JA, 1 ) ++ SUM( JA, 1 ) = -ACOEF*SUMS( JA, 1 ) + ++ $ BCOEFR*SUMP( JA, 1 ) + END IF + 130 CONTINUE + * +@@ -703,7 +705,7 @@ + * Solve ( a A - b B ) y = SUM(,) + * with scaling and perturbation of the denominator + * +- CALL DLALN2( .TRUE., NA, NW, DMIN, ACOEF, A( J, J ), LDA, ++ CALL DLALN2( .TRUE., NA, NW, DMIN, ACOEF, S( J, J ), LDS, + $ BDIAG( 1 ), BDIAG( 2 ), SUM, 2, BCOEFR, + $ BCOEFI, WORK( 2*N+J ), N, SCALE, TEMP, + $ IINFO ) +@@ -790,7 +792,7 @@ + END IF + NW = 1 + IF( JE.GT.1 ) THEN +- IF( A( JE, JE-1 ).NE.ZERO ) THEN ++ IF( S( JE, JE-1 ).NE.ZERO ) THEN + ILCPLX = .TRUE. + NW = 2 + END IF +@@ -809,8 +811,8 @@ + * (c) complex eigenvalue. + * + IF( .NOT.ILCPLX ) THEN +- IF( ABS( A( JE, JE ) ).LE.SAFMIN .AND. +- $ ABS( B( JE, JE ) ).LE.SAFMIN ) THEN ++ IF( ABS( S( JE, JE ) ).LE.SAFMIN .AND. ++ $ ABS( P( JE, JE ) ).LE.SAFMIN ) THEN + * + * Singular matrix pencil -- unit eigenvector + * +@@ -839,10 +841,10 @@ + * + * Real eigenvalue + * +- TEMP = ONE / MAX( ABS( A( JE, JE ) )*ASCALE, +- $ ABS( B( JE, JE ) )*BSCALE, SAFMIN ) +- SALFAR = ( TEMP*A( JE, JE ) )*ASCALE +- SBETA = ( TEMP*B( JE, JE ) )*BSCALE ++ TEMP = ONE / MAX( ABS( S( JE, JE ) )*ASCALE, ++ $ ABS( P( JE, JE ) )*BSCALE, SAFMIN ) ++ SALFAR = ( TEMP*S( JE, JE ) )*ASCALE ++ SBETA = ( TEMP*P( JE, JE ) )*BSCALE + ACOEF = SBETA*ASCALE + BCOEFR = SALFAR*BSCALE + BCOEFI = ZERO +@@ -885,14 +887,14 @@ + * (See "Further Details", above.) + * + DO 260 JR = 1, JE - 1 +- WORK( 2*N+JR ) = BCOEFR*B( JR, JE ) - +- $ ACOEF*A( JR, JE ) ++ WORK( 2*N+JR ) = BCOEFR*P( JR, JE ) - ++ $ ACOEF*S( JR, JE ) + 260 CONTINUE + ELSE + * + * Complex eigenvalue + * +- CALL DLAG2( A( JE-1, JE-1 ), LDA, B( JE-1, JE-1 ), LDB, ++ CALL DLAG2( S( JE-1, JE-1 ), LDS, P( JE-1, JE-1 ), LDP, + $ SAFMIN*SAFETY, ACOEF, TEMP, BCOEFR, TEMP2, + $ BCOEFI ) + IF( BCOEFI.EQ.ZERO ) THEN +@@ -924,9 +926,9 @@ + * Compute first two components of eigenvector + * and contribution to sums + * +- TEMP = ACOEF*A( JE, JE-1 ) +- TEMP2R = ACOEF*A( JE, JE ) - BCOEFR*B( JE, JE ) +- TEMP2I = -BCOEFI*B( JE, JE ) ++ TEMP = ACOEF*S( JE, JE-1 ) ++ TEMP2R = ACOEF*S( JE, JE ) - BCOEFR*P( JE, JE ) ++ TEMP2I = -BCOEFI*P( JE, JE ) + IF( ABS( TEMP ).GE.ABS( TEMP2R )+ABS( TEMP2I ) ) THEN + WORK( 2*N+JE ) = ONE + WORK( 3*N+JE ) = ZERO +@@ -935,10 +937,10 @@ + ELSE + WORK( 2*N+JE-1 ) = ONE + WORK( 3*N+JE-1 ) = ZERO +- TEMP = ACOEF*A( JE-1, JE ) +- WORK( 2*N+JE ) = ( BCOEFR*B( JE-1, JE-1 )-ACOEF* +- $ A( JE-1, JE-1 ) ) / TEMP +- WORK( 3*N+JE ) = BCOEFI*B( JE-1, JE-1 ) / TEMP ++ TEMP = ACOEF*S( JE-1, JE ) ++ WORK( 2*N+JE ) = ( BCOEFR*P( JE-1, JE-1 )-ACOEF* ++ $ S( JE-1, JE-1 ) ) / TEMP ++ WORK( 3*N+JE ) = BCOEFI*P( JE-1, JE-1 ) / TEMP + END IF + * + XMAX = MAX( ABS( WORK( 2*N+JE ) )+ABS( WORK( 3*N+JE ) ), +@@ -958,12 +960,12 @@ + CRE2B = BCOEFR*WORK( 2*N+JE ) - BCOEFI*WORK( 3*N+JE ) + CIM2B = BCOEFI*WORK( 2*N+JE ) + BCOEFR*WORK( 3*N+JE ) + DO 270 JR = 1, JE - 2 +- WORK( 2*N+JR ) = -CREALA*A( JR, JE-1 ) + +- $ CREALB*B( JR, JE-1 ) - +- $ CRE2A*A( JR, JE ) + CRE2B*B( JR, JE ) +- WORK( 3*N+JR ) = -CIMAGA*A( JR, JE-1 ) + +- $ CIMAGB*B( JR, JE-1 ) - +- $ CIM2A*A( JR, JE ) + CIM2B*B( JR, JE ) ++ WORK( 2*N+JR ) = -CREALA*S( JR, JE-1 ) + ++ $ CREALB*P( JR, JE-1 ) - ++ $ CRE2A*S( JR, JE ) + CRE2B*P( JR, JE ) ++ WORK( 3*N+JR ) = -CIMAGA*S( JR, JE-1 ) + ++ $ CIMAGB*P( JR, JE-1 ) - ++ $ CIM2A*S( JR, JE ) + CIM2B*P( JR, JE ) + 270 CONTINUE + END IF + * +@@ -978,23 +980,23 @@ + * next iteration to process it (when it will be j:j+1) + * + IF( .NOT.IL2BY2 .AND. J.GT.1 ) THEN +- IF( A( J, J-1 ).NE.ZERO ) THEN ++ IF( S( J, J-1 ).NE.ZERO ) THEN + IL2BY2 = .TRUE. + GO TO 370 + END IF + END IF +- BDIAG( 1 ) = B( J, J ) ++ BDIAG( 1 ) = P( J, J ) + IF( IL2BY2 ) THEN + NA = 2 +- BDIAG( 2 ) = B( J+1, J+1 ) ++ BDIAG( 2 ) = P( J+1, J+1 ) + ELSE + NA = 1 + END IF + * + * Compute x(j) (and x(j+1), if 2-by-2 block) + * +- CALL DLALN2( .FALSE., NA, NW, DMIN, ACOEF, A( J, J ), +- $ LDA, BDIAG( 1 ), BDIAG( 2 ), WORK( 2*N+J ), ++ CALL DLALN2( .FALSE., NA, NW, DMIN, ACOEF, S( J, J ), ++ $ LDS, BDIAG( 1 ), BDIAG( 2 ), WORK( 2*N+J ), + $ N, BCOEFR, BCOEFI, SUM, 2, SCALE, TEMP, + $ IINFO ) + IF( SCALE.LT.ONE ) THEN +@@ -1014,7 +1016,7 @@ + 300 CONTINUE + 310 CONTINUE + * +-* w = w + x(j)*(a A(*,j) - b B(*,j) ) with scaling ++* w = w + x(j)*(a S(*,j) - b P(*,j) ) with scaling + * + IF( J.GT.1 ) THEN + * +@@ -1052,19 +1054,19 @@ + $ BCOEFR*WORK( 3*N+J+JA-1 ) + DO 340 JR = 1, J - 1 + WORK( 2*N+JR ) = WORK( 2*N+JR ) - +- $ CREALA*A( JR, J+JA-1 ) + +- $ CREALB*B( JR, J+JA-1 ) ++ $ CREALA*S( JR, J+JA-1 ) + ++ $ CREALB*P( JR, J+JA-1 ) + WORK( 3*N+JR ) = WORK( 3*N+JR ) - +- $ CIMAGA*A( JR, J+JA-1 ) + +- $ CIMAGB*B( JR, J+JA-1 ) ++ $ CIMAGA*S( JR, J+JA-1 ) + ++ $ CIMAGB*P( JR, J+JA-1 ) + 340 CONTINUE + ELSE + CREALA = ACOEF*WORK( 2*N+J+JA-1 ) + CREALB = BCOEFR*WORK( 2*N+J+JA-1 ) + DO 350 JR = 1, J - 1 + WORK( 2*N+JR ) = WORK( 2*N+JR ) - +- $ CREALA*A( JR, J+JA-1 ) + +- $ CREALB*B( JR, J+JA-1 ) ++ $ CREALA*S( JR, J+JA-1 ) + ++ $ CREALB*P( JR, J+JA-1 ) + 350 CONTINUE + END IF + 360 CONTINUE +diff -uNr LAPACK.orig/SRC/dtrevc.f LAPACK/SRC/dtrevc.f +--- LAPACK.orig/SRC/dtrevc.f Thu Nov 4 14:24:59 1999 ++++ LAPACK/SRC/dtrevc.f Fri May 25 16:13:52 2001 +@@ -4,7 +4,7 @@ + * -- LAPACK routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* June 30, 1999 ++* May 7, 2001 + * + * .. Scalar Arguments .. + CHARACTER HOWMNY, SIDE +@@ -21,28 +21,23 @@ + * + * DTREVC computes some or all of the right and/or left eigenvectors of + * a real upper quasi-triangular matrix T. +-* ++* Matrices of this type are produced by the Schur factorization of ++* a real general matrix: A = Q*T*Q**T, as computed by DHSEQR. ++* + * The right eigenvector x and the left eigenvector y of T corresponding + * to an eigenvalue w are defined by: +-* +-* T*x = w*x, y'*T = w*y' +-* +-* where y' denotes the conjugate transpose of the vector y. +-* +-* If all eigenvectors are requested, the routine may either return the +-* matrices X and/or Y of right or left eigenvectors of T, or the +-* products Q*X and/or Q*Y, where Q is an input orthogonal +-* matrix. If T was obtained from the real-Schur factorization of an +-* original matrix A = Q*T*Q', then Q*X and Q*Y are the matrices of +-* right or left eigenvectors of A. +-* +-* T must be in Schur canonical form (as returned by DHSEQR), that is, +-* block upper triangular with 1-by-1 and 2-by-2 diagonal blocks; each +-* 2-by-2 diagonal block has its diagonal elements equal and its +-* off-diagonal elements of opposite sign. Corresponding to each 2-by-2 +-* diagonal block is a complex conjugate pair of eigenvalues and +-* eigenvectors; only one eigenvector of the pair is computed, namely +-* the one corresponding to the eigenvalue with positive imaginary part. ++* ++* T*x = w*x, (y**H)*T = w*(y**H) ++* ++* where y**H denotes the conjugate transpose of y. ++* The eigenvalues are not input to this routine, but are read directly ++* from the diagonal blocks of T. ++* ++* This routine returns the matrices X and/or Y of right and left ++* eigenvectors of T, or the products Q*X and/or Q*Y, where Q is an ++* input matrix. If Q is the orthogonal factor that reduces a matrix ++* A to Schur form T, then Q*X and Q*Y are the matrices of right and ++* left eigenvectors of A. + * + * Arguments + * ========= +@@ -55,21 +50,21 @@ + * HOWMNY (input) CHARACTER*1 + * = 'A': compute all right and/or left eigenvectors; + * = 'B': compute all right and/or left eigenvectors, +-* and backtransform them using the input matrices +-* supplied in VR and/or VL; ++* backtransformed by the matrices in VR and/or VL; + * = 'S': compute selected right and/or left eigenvectors, +-* specified by the logical array SELECT. ++* as indicated by the logical array SELECT. + * + * SELECT (input/output) LOGICAL array, dimension (N) + * If HOWMNY = 'S', SELECT specifies the eigenvectors to be + * computed. +-* If HOWMNY = 'A' or 'B', SELECT is not referenced. +-* To select the real eigenvector corresponding to a real +-* eigenvalue w(j), SELECT(j) must be set to .TRUE.. To select +-* the complex eigenvector corresponding to a complex conjugate +-* pair w(j) and w(j+1), either SELECT(j) or SELECT(j+1) must be +-* set to .TRUE.; then on exit SELECT(j) is .TRUE. and +-* SELECT(j+1) is .FALSE.. ++* If w(j) is a real eigenvalue, the corresponding real ++* eigenvector is computed if SELECT(j) is .TRUE.. ++* If w(j) and w(j+1) are the real and imaginary parts of a ++* complex eigenvalue, the corresponding complex eigenvector is ++* computed if either SELECT(j) or SELECT(j+1) is .TRUE., and ++* on exit SELECT(j) is set to .TRUE. and SELECT(j+1) is set to ++* .FALSE.. ++* Not referenced if HOWMNY = 'A' or 'B'. + * + * N (input) INTEGER + * The order of the matrix T. N >= 0. +@@ -86,15 +81,6 @@ + * of Schur vectors returned by DHSEQR). + * On exit, if SIDE = 'L' or 'B', VL contains: + * if HOWMNY = 'A', the matrix Y of left eigenvectors of T; +-* VL has the same quasi-lower triangular form +-* as T'. If T(i,i) is a real eigenvalue, then +-* the i-th column VL(i) of VL is its +-* corresponding eigenvector. If T(i:i+1,i:i+1) +-* is a 2-by-2 block whose eigenvalues are +-* complex-conjugate eigenvalues of T, then +-* VL(i)+sqrt(-1)*VL(i+1) is the complex +-* eigenvector corresponding to the eigenvalue +-* with positive real part. + * if HOWMNY = 'B', the matrix Q*Y; + * if HOWMNY = 'S', the left eigenvectors of T specified by + * SELECT, stored consecutively in the columns +@@ -103,11 +89,11 @@ + * A complex eigenvector corresponding to a complex eigenvalue + * is stored in two consecutive columns, the first holding the + * real part, and the second the imaginary part. +-* If SIDE = 'R', VL is not referenced. ++* Not referenced if SIDE = 'R'. + * + * LDVL (input) INTEGER +-* The leading dimension of the array VL. LDVL >= max(1,N) if +-* SIDE = 'L' or 'B'; LDVL >= 1 otherwise. ++* The leading dimension of the array VL. LDVL >= 1, and if ++* SIDE = 'L' or 'B', LDVL >= N. + * + * VR (input/output) DOUBLE PRECISION array, dimension (LDVR,MM) + * On entry, if SIDE = 'R' or 'B' and HOWMNY = 'B', VR must +@@ -115,15 +101,6 @@ + * of Schur vectors returned by DHSEQR). + * On exit, if SIDE = 'R' or 'B', VR contains: + * if HOWMNY = 'A', the matrix X of right eigenvectors of T; +-* VR has the same quasi-upper triangular form +-* as T. If T(i,i) is a real eigenvalue, then +-* the i-th column VR(i) of VR is its +-* corresponding eigenvector. If T(i:i+1,i:i+1) +-* is a 2-by-2 block whose eigenvalues are +-* complex-conjugate eigenvalues of T, then +-* VR(i)+sqrt(-1)*VR(i+1) is the complex +-* eigenvector corresponding to the eigenvalue +-* with positive real part. + * if HOWMNY = 'B', the matrix Q*X; + * if HOWMNY = 'S', the right eigenvectors of T specified by + * SELECT, stored consecutively in the columns +@@ -132,11 +109,11 @@ + * A complex eigenvector corresponding to a complex eigenvalue + * is stored in two consecutive columns, the first holding the + * real part and the second the imaginary part. +-* If SIDE = 'L', VR is not referenced. ++* Not referenced if SIDE = 'L'. + * + * LDVR (input) INTEGER +-* The leading dimension of the array VR. LDVR >= max(1,N) if +-* SIDE = 'R' or 'B'; LDVR >= 1 otherwise. ++* The leading dimension of the array VR. LDVR >= 1, and if ++* SIDE = 'R' or 'B', LDVR >= N. + * + * MM (input) INTEGER + * The number of columns in the arrays VL and/or VR. MM >= M. +diff -uNr LAPACK.orig/SRC/dtrsen.f LAPACK/SRC/dtrsen.f +--- LAPACK.orig/SRC/dtrsen.f Thu Nov 4 14:24:59 1999 ++++ LAPACK/SRC/dtrsen.f Fri May 25 16:14:10 2001 +@@ -4,7 +4,7 @@ + * -- LAPACK routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* June 30, 1999 ++* January 3, 2001 + * + * .. Scalar Arguments .. + CHARACTER COMPQ, JOB +@@ -118,8 +118,8 @@ + * LWORK (input) INTEGER + * The dimension of the array WORK. + * If JOB = 'N', LWORK >= max(1,N); +-* if JOB = 'E', LWORK >= M*(N-M); +-* if JOB = 'V' or 'B', LWORK >= 2*M*(N-M). ++* if JOB = 'E', LWORK >= max(1,M*(N-M)); ++* if JOB = 'V' or 'B', LWORK >= max(1,2*M*(N-M)). + * + * If LWORK = -1, then a workspace query is assumed; the routine + * only calculates the optimal size of the WORK array, returns +@@ -127,12 +127,12 @@ + * message related to LWORK is issued by XERBLA. + * + * IWORK (workspace) INTEGER array, dimension (LIWORK) +-* IF JOB = 'N' or 'E', IWORK is not referenced. ++* On exit, if INFO = 0, IWORK(1) returns the optimal LIWORK. + * + * LIWORK (input) INTEGER + * The dimension of the array IWORK. + * If JOB = 'N' or 'E', LIWORK >= 1; +-* if JOB = 'V' or 'B', LIWORK >= M*(N-M). ++* if JOB = 'V' or 'B', LIWORK >= max(1,M*(N-M)). + * + * If LIWORK = -1, then a workspace query is assumed; the + * routine only calculates the optimal size of the IWORK array, +diff -uNr LAPACK.orig/SRC/sbdsqr.f LAPACK/SRC/sbdsqr.f +--- LAPACK.orig/SRC/sbdsqr.f Thu Nov 4 14:25:42 1999 ++++ LAPACK/SRC/sbdsqr.f Fri May 25 15:58:54 2001 +@@ -4,7 +4,7 @@ + * -- LAPACK routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* October 31, 1999 ++* April 25, 2001 + * + * .. Scalar Arguments .. + CHARACTER UPLO +@@ -18,14 +18,26 @@ + * Purpose + * ======= + * +-* SBDSQR computes the singular value decomposition (SVD) of a real +-* N-by-N (upper or lower) bidiagonal matrix B: B = Q * S * P' (P' +-* denotes the transpose of P), where S is a diagonal matrix with +-* non-negative diagonal elements (the singular values of B), and Q +-* and P are orthogonal matrices. +-* +-* The routine computes S, and optionally computes U * Q, P' * VT, +-* or Q' * C, for given real input matrices U, VT, and C. ++* SBDSQR computes the singular values and, optionally, the right and/or ++* left singular vectors from the singular value decomposition (SVD) of ++* a real N-by-N (upper or lower) bidiagonal matrix B using the implicit ++* zero-shift QR algorithm. The SVD of B has the form ++* ++* B = Q * S * P**T ++* ++* where S is the diagonal matrix of singular values, Q is an orthogonal ++* matrix of left singular vectors, and P is an orthogonal matrix of ++* right singular vectors. If left singular vectors are requested, this ++* subroutine actually returns U*Q instead of Q, and, if right singular ++* vectors are requested, this subroutine returns P**T*VT instead of ++* P**T, for given real input matrices U and VT. When U and VT are the ++* orthogonal matrices that reduce a general matrix A to bidiagonal ++* form: A = U*B*VT, as computed by SGEBRD, then ++* ++* A = (U*Q) * S * (P**T*VT) ++* ++* is the SVD of A. Optionally, the subroutine may also compute Q**T*C ++* for a given real input matrix C. + * + * See "Computing Small Singular Values of Bidiagonal Matrices With + * Guaranteed High Relative Accuracy," by J. Demmel and W. Kahan, +@@ -61,18 +73,17 @@ + * order. + * + * E (input/output) REAL array, dimension (N) +-* On entry, the elements of E contain the +-* offdiagonal elements of the bidiagonal matrix whose SVD +-* is desired. On normal exit (INFO = 0), E is destroyed. +-* If the algorithm does not converge (INFO > 0), D and E ++* On entry, the N-1 offdiagonal elements of the bidiagonal ++* matrix B. ++* On exit, if INFO = 0, E is destroyed; if INFO > 0, D and E + * will contain the diagonal and superdiagonal elements of a + * bidiagonal matrix orthogonally equivalent to the one given + * as input. E(N) is used for workspace. + * + * VT (input/output) REAL array, dimension (LDVT, NCVT) + * On entry, an N-by-NCVT matrix VT. +-* On exit, VT is overwritten by P' * VT. +-* VT is not referenced if NCVT = 0. ++* On exit, VT is overwritten by P**T * VT. ++* Not referenced if NCVT = 0. + * + * LDVT (input) INTEGER + * The leading dimension of the array VT. +@@ -81,21 +92,22 @@ + * U (input/output) REAL array, dimension (LDU, N) + * On entry, an NRU-by-N matrix U. + * On exit, U is overwritten by U * Q. +-* U is not referenced if NRU = 0. ++* Not referenced if NRU = 0. + * + * LDU (input) INTEGER + * The leading dimension of the array U. LDU >= max(1,NRU). + * + * C (input/output) REAL array, dimension (LDC, NCC) + * On entry, an N-by-NCC matrix C. +-* On exit, C is overwritten by Q' * C. +-* C is not referenced if NCC = 0. ++* On exit, C is overwritten by Q**T * C. ++* Not referenced if NCC = 0. + * + * LDC (input) INTEGER + * The leading dimension of the array C. + * LDC >= max(1,N) if NCC > 0; LDC >=1 if NCC = 0. + * +-* WORK (workspace) REAL array, dimension (4*N) ++* WORK (workspace) REAL array, dimension (2*N) ++* if NCVT = NRU = NCC = 0, (max(1, 4*N-4)) otherwise + * + * INFO (output) INTEGER + * = 0: successful exit +diff -uNr LAPACK.orig/SRC/sgebd2.f LAPACK/SRC/sgebd2.f +--- LAPACK.orig/SRC/sgebd2.f Thu Nov 4 14:23:33 1999 ++++ LAPACK/SRC/sgebd2.f Fri May 25 15:59:24 2001 +@@ -3,7 +3,7 @@ + * -- LAPACK routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* February 29, 1992 ++* May 7, 2001 + * + * .. Scalar Arguments .. + INTEGER INFO, LDA, M, N +@@ -169,8 +169,9 @@ + * + * Apply H(i) to A(i:m,i+1:n) from the left + * +- CALL SLARF( 'Left', M-I+1, N-I, A( I, I ), 1, TAUQ( I ), +- $ A( I, I+1 ), LDA, WORK ) ++ IF( I.LT.N ) ++ $ CALL SLARF( 'Left', M-I+1, N-I, A( I, I ), 1, TAUQ( I ), ++ $ A( I, I+1 ), LDA, WORK ) + A( I, I ) = D( I ) + * + IF( I.LT.N ) THEN +@@ -207,8 +208,9 @@ + * + * Apply G(i) to A(i+1:m,i:n) from the right + * +- CALL SLARF( 'Right', M-I, N-I+1, A( I, I ), LDA, TAUP( I ), +- $ A( MIN( I+1, M ), I ), LDA, WORK ) ++ IF( I.LT.M ) ++ $ CALL SLARF( 'Right', M-I, N-I+1, A( I, I ), LDA, ++ $ TAUP( I ), A( MIN( I+1, M ), I ), LDA, WORK ) + A( I, I ) = D( I ) + * + IF( I.LT.M ) THEN +diff -uNr LAPACK.orig/SRC/sgees.f LAPACK/SRC/sgees.f +--- LAPACK.orig/SRC/sgees.f Thu Nov 4 14:23:33 1999 ++++ LAPACK/SRC/sgees.f Fri May 25 15:59:45 2001 +@@ -5,6 +5,7 @@ + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University + * June 30, 1999 ++* 8-15-00: Improve consistency of WS calculations (eca) + * + * .. Scalar Arguments .. + CHARACTER JOBVS, SORT +@@ -110,10 +111,9 @@ + * The dimension of the array WORK. LWORK >= max(1,3*N). + * For good performance, LWORK must generally be larger. + * +-* If LWORK = -1, then a workspace query is assumed; the routine +-* only calculates the optimal size of the WORK array, returns +-* this value as the first entry of the WORK array, and no error +-* message related to LWORK is issued by XERBLA. ++* If LWORK = -1, a workspace query is assumed. The optimal ++* size for the WORK array is calculated and stored in WORK(1), ++* and no other work except argument checking is performed. + * + * BWORK (workspace) LOGICAL array, dimension (N) + * Not referenced if SORT = 'N'. +@@ -138,12 +138,13 @@ + * ===================================================================== + * + * .. Parameters .. ++ INTEGER LQUERV ++ PARAMETER ( LQUERV = -1 ) + REAL ZERO, ONE + PARAMETER ( ZERO = 0.0E0, ONE = 1.0E0 ) + * .. + * .. Local Scalars .. +- LOGICAL CURSL, LASTSL, LQUERY, LST2SL, SCALEA, WANTST, +- $ WANTVS ++ LOGICAL CURSL, LASTSL, LST2SL, SCALEA, WANTST, WANTVS + INTEGER HSWORK, I, I1, I2, IBAL, ICOND, IERR, IEVAL, + $ IHI, ILO, INXT, IP, ITAU, IWRK, K, MAXB, + $ MAXWRK, MINWRK +@@ -171,7 +172,6 @@ + * Test the input arguments + * + INFO = 0 +- LQUERY = ( LWORK.EQ.-1 ) + WANTVS = LSAME( JOBVS, 'V' ) + WANTST = LSAME( SORT, 'S' ) + IF( ( .NOT.WANTVS ) .AND. ( .NOT.LSAME( JOBVS, 'N' ) ) ) THEN +@@ -197,7 +197,7 @@ + * the worst case.) + * + MINWRK = 1 +- IF( INFO.EQ.0 .AND. ( LWORK.GE.1 .OR. LQUERY ) ) THEN ++ IF( INFO.EQ.0 ) THEN + MAXWRK = 2*N + N*ILAENV( 1, 'SGEHRD', ' ', N, 1, N, 0 ) + MINWRK = MAX( 1, 3*N ) + IF( .NOT.WANTVS ) THEN +@@ -216,19 +216,17 @@ + MAXWRK = MAX( MAXWRK, N+HSWORK, 1 ) + END IF + WORK( 1 ) = MAXWRK ++ IF( LWORK.LT.MINWRK .AND. LWORK.NE.LQUERV ) ++ $ INFO = -13 + END IF +- IF( LWORK.LT.MINWRK .AND. .NOT.LQUERY ) THEN +- INFO = -13 +- END IF ++* ++* Quick returns ++* + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'SGEES ', -INFO ) + RETURN +- ELSE IF( LQUERY ) THEN +- RETURN + END IF +-* +-* Quick return if possible +-* ++ IF( LWORK.EQ.LQUERV ) RETURN + IF( N.EQ.0 ) THEN + SDIM = 0 + RETURN +diff -uNr LAPACK.orig/SRC/sgeesx.f LAPACK/SRC/sgeesx.f +--- LAPACK.orig/SRC/sgeesx.f Thu Nov 4 14:23:34 1999 ++++ LAPACK/SRC/sgeesx.f Fri May 25 16:00:09 2001 +@@ -6,6 +6,7 @@ + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University + * June 30, 1999 ++* 8-15-00: Do WS calculations if LWORK = -1 (eca) + * + * .. Scalar Arguments .. + CHARACTER JOBVS, SENSE, SORT +@@ -140,6 +141,10 @@ + * N+2*SDIM*(N-SDIM) <= N+N*N/2. + * For good performance, LWORK must generally be larger. + * ++* If LWORK = -1, a workspace query is assumed. The optimal ++* size for the WORK array is calculated and stored in WORK(1), ++* and no other work except argument checking is performed. ++* + * IWORK (workspace/output) INTEGER array, dimension (LIWORK) + * Not referenced if SENSE = 'N' or 'E'. + * On exit, if INFO = 0, IWORK(1) returns the optimal LIWORK. +@@ -171,6 +176,8 @@ + * ===================================================================== + * + * .. Parameters .. ++ INTEGER LQUERV ++ PARAMETER ( LQUERV = -1 ) + REAL ZERO, ONE + PARAMETER ( ZERO = 0.0E0, ONE = 1.0E0 ) + * .. +@@ -239,7 +246,7 @@ + * in the code.) + * + MINWRK = 1 +- IF( INFO.EQ.0 .AND. LWORK.GE.1 ) THEN ++ IF( INFO.EQ.0 ) THEN + MAXWRK = 2*N + N*ILAENV( 1, 'SGEHRD', ' ', N, 1, N, 0 ) + MINWRK = MAX( 1, 3*N ) + IF( .NOT.WANTVS ) THEN +@@ -257,21 +264,24 @@ + HSWORK = MAX( K*( K+2 ), 2*N ) + MAXWRK = MAX( MAXWRK, N+HSWORK, 1 ) + END IF ++* ++* Estimate the workspace needed by STRSEN. ++* ++ IF( WANTST ) THEN ++ MAXWRK = MAX( MAXWRK, N+(N*N+1)/2 ) ++ END IF + WORK( 1 ) = MAXWRK ++ IF( LWORK.LT.MINWRK .AND. LWORK.NE.LQUERV ) ++ $ INFO = -16 + END IF +- IF( LWORK.LT.MINWRK ) THEN +- INFO = -16 +- END IF +- IF( LIWORK.LT.1 ) THEN +- INFO = -18 +- END IF ++* ++* Quick returns ++* + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'SGEESX', -INFO ) + RETURN + END IF +-* +-* Quick return if possible +-* ++ IF( LWORK.EQ.LQUERV ) RETURN + IF( N.EQ.0 ) THEN + SDIM = 0 + RETURN +diff -uNr LAPACK.orig/SRC/sgeev.f LAPACK/SRC/sgeev.f +--- LAPACK.orig/SRC/sgeev.f Wed Dec 8 16:00:09 1999 ++++ LAPACK/SRC/sgeev.f Fri May 25 16:00:38 2001 +@@ -4,7 +4,8 @@ + * -- LAPACK driver routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* December 8, 1999 ++* June 30, 1999 ++* 8-15-00: Improve consistency of WS calculations (eca) + * + * .. Scalar Arguments .. + CHARACTER JOBVL, JOBVR +@@ -98,10 +99,9 @@ + * if JOBVL = 'V' or JOBVR = 'V', LWORK >= 4*N. For good + * performance, LWORK must generally be larger. + * +-* If LWORK = -1, then a workspace query is assumed; the routine +-* only calculates the optimal size of the WORK array, returns +-* this value as the first entry of the WORK array, and no error +-* message related to LWORK is issued by XERBLA. ++* If LWORK = -1, a workspace query is assumed. The optimal ++* size for the WORK array is calculated and stored in WORK(1), ++* and no other work except argument checking is performed. + * + * INFO (output) INTEGER + * = 0: successful exit +@@ -114,11 +114,13 @@ + * ===================================================================== + * + * .. Parameters .. ++ INTEGER LQUERV ++ PARAMETER ( LQUERV = -1 ) + REAL ZERO, ONE + PARAMETER ( ZERO = 0.0E0, ONE = 1.0E0 ) + * .. + * .. Local Scalars .. +- LOGICAL LQUERY, SCALEA, WANTVL, WANTVR ++ LOGICAL SCALEA, WANTVL, WANTVR + CHARACTER SIDE + INTEGER HSWORK, I, IBAL, IERR, IHI, ILO, ITAU, IWRK, K, + $ MAXB, MAXWRK, MINWRK, NOUT +@@ -149,7 +151,6 @@ + * Test the input arguments + * + INFO = 0 +- LQUERY = ( LWORK.EQ.-1 ) + WANTVL = LSAME( JOBVL, 'V' ) + WANTVR = LSAME( JOBVR, 'V' ) + IF( ( .NOT.WANTVL ) .AND. ( .NOT.LSAME( JOBVL, 'N' ) ) ) THEN +@@ -177,7 +178,7 @@ + * the worst case.) + * + MINWRK = 1 +- IF( INFO.EQ.0 .AND. ( LWORK.GE.1 .OR. LQUERY ) ) THEN ++ IF( INFO.EQ.0 ) THEN + MAXWRK = 2*N + N*ILAENV( 1, 'SGEHRD', ' ', N, 1, N, 0 ) + IF( ( .NOT.WANTVL ) .AND. ( .NOT.WANTVR ) ) THEN + MINWRK = MAX( 1, 3*N ) +@@ -198,19 +199,17 @@ + MAXWRK = MAX( MAXWRK, 4*N ) + END IF + WORK( 1 ) = MAXWRK ++ IF( LWORK.LT.MINWRK .AND. LWORK.NE.LQUERV ) ++ $ INFO = -13 + END IF +- IF( LWORK.LT.MINWRK .AND. .NOT.LQUERY ) THEN +- INFO = -13 +- END IF ++* ++* Quick returns ++* + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'SGEEV ', -INFO ) + RETURN +- ELSE IF( LQUERY ) THEN +- RETURN + END IF +-* +-* Quick return if possible +-* ++ IF( LWORK.EQ.LQUERV ) RETURN + IF( N.EQ.0 ) + $ RETURN + * +diff -uNr LAPACK.orig/SRC/sgeevx.f LAPACK/SRC/sgeevx.f +--- LAPACK.orig/SRC/sgeevx.f Thu Nov 4 14:23:34 1999 ++++ LAPACK/SRC/sgeevx.f Fri May 25 16:00:59 2001 +@@ -6,6 +6,7 @@ + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University + * June 30, 1999 ++* 8-15-00: Improve consistency of WS calculations (eca) + * + * .. Scalar Arguments .. + CHARACTER BALANC, JOBVL, JOBVR, SENSE +@@ -179,10 +180,9 @@ + * LWORK >= 3*N. If SENSE = 'V' or 'B', LWORK >= N*(N+6). + * For good performance, LWORK must generally be larger. + * +-* If LWORK = -1, then a workspace query is assumed; the routine +-* only calculates the optimal size of the WORK array, returns +-* this value as the first entry of the WORK array, and no error +-* message related to LWORK is issued by XERBLA. ++* If LWORK = -1, a workspace query is assumed. The optimal ++* size for the WORK array is calculated and stored in WORK(1), ++* and no other work except argument checking is performed. + * + * IWORK (workspace) INTEGER array, dimension (2*N-2) + * If SENSE = 'N' or 'E', not referenced. +@@ -198,12 +198,14 @@ + * ===================================================================== + * + * .. Parameters .. ++ INTEGER LQUERV ++ PARAMETER ( LQUERV = -1 ) + REAL ZERO, ONE + PARAMETER ( ZERO = 0.0E0, ONE = 1.0E0 ) + * .. + * .. Local Scalars .. +- LOGICAL LQUERY, SCALEA, WANTVL, WANTVR, WNTSNB, WNTSNE, +- $ WNTSNN, WNTSNV ++ LOGICAL SCALEA, WANTVL, WANTVR, WNTSNB, WNTSNE, WNTSNN, ++ $ WNTSNV + CHARACTER JOB, SIDE + INTEGER HSWORK, I, ICOND, IERR, ITAU, IWRK, K, MAXB, + $ MAXWRK, MINWRK, NOUT +@@ -234,7 +236,6 @@ + * Test the input arguments + * + INFO = 0 +- LQUERY = ( LWORK.EQ.-1 ) + WANTVL = LSAME( JOBVL, 'V' ) + WANTVR = LSAME( JOBVR, 'V' ) + WNTSNN = LSAME( SENSE, 'N' ) +@@ -273,7 +274,7 @@ + * the worst case.) + * + MINWRK = 1 +- IF( INFO.EQ.0 .AND. ( LWORK.GE.1 .OR. LQUERY ) ) THEN ++ IF( INFO.EQ.0 ) THEN + MAXWRK = N + N*ILAENV( 1, 'SGEHRD', ' ', N, 1, N, 0 ) + IF( ( .NOT.WANTVL ) .AND. ( .NOT.WANTVR ) ) THEN + MINWRK = MAX( 1, 2*N ) +@@ -307,19 +308,17 @@ + MAXWRK = MAX( MAXWRK, 3*N, 1 ) + END IF + WORK( 1 ) = MAXWRK ++ IF( LWORK.LT.MINWRK .AND. LWORK.NE.LQUERV ) ++ $ INFO = -21 + END IF +- IF( LWORK.LT.MINWRK .AND. .NOT.LQUERY ) THEN +- INFO = -21 +- END IF ++* ++* Quick returns ++* + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'SGEEVX', -INFO ) + RETURN +- ELSE IF( LQUERY ) THEN +- RETURN + END IF +-* +-* Quick return if possible +-* ++ IF( LWORK.EQ.LQUERV ) RETURN + IF( N.EQ.0 ) + $ RETURN + * +diff -uNr LAPACK.orig/SRC/sgegs.f LAPACK/SRC/sgegs.f +--- LAPACK.orig/SRC/sgegs.f Thu Nov 4 14:23:34 1999 ++++ LAPACK/SRC/sgegs.f Fri May 25 16:01:48 2001 +@@ -5,7 +5,7 @@ + * -- LAPACK driver routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* June 30, 1999 ++* April 26, 2001 + * + * .. Scalar Arguments .. + CHARACTER JOBVSL, JOBVSR +@@ -22,105 +22,75 @@ + * + * This routine is deprecated and has been replaced by routine SGGES. + * +-* SGEGS computes for a pair of N-by-N real nonsymmetric matrices A, B: +-* the generalized eigenvalues (alphar +/- alphai*i, beta), the real +-* Schur form (A, B), and optionally left and/or right Schur vectors +-* (VSL and VSR). +-* +-* (If only the generalized eigenvalues are needed, use the driver SGEGV +-* instead.) +-* +-* A generalized eigenvalue for a pair of matrices (A,B) is, roughly +-* speaking, a scalar w or a ratio alpha/beta = w, such that A - w*B +-* is singular. It is usually represented as the pair (alpha,beta), +-* as there is a reasonable interpretation for beta=0, and even for +-* both being zero. A good beginning reference is the book, "Matrix +-* Computations", by G. Golub & C. van Loan (Johns Hopkins U. Press) +-* +-* The (generalized) Schur form of a pair of matrices is the result of +-* multiplying both matrices on the left by one orthogonal matrix and +-* both on the right by another orthogonal matrix, these two orthogonal +-* matrices being chosen so as to bring the pair of matrices into +-* (real) Schur form. +-* +-* A pair of matrices A, B is in generalized real Schur form if B is +-* upper triangular with non-negative diagonal and A is block upper +-* triangular with 1-by-1 and 2-by-2 blocks. 1-by-1 blocks correspond +-* to real generalized eigenvalues, while 2-by-2 blocks of A will be +-* "standardized" by making the corresponding elements of B have the +-* form: +-* [ a 0 ] +-* [ 0 b ] +-* +-* and the pair of corresponding 2-by-2 blocks in A and B will +-* have a complex conjugate pair of generalized eigenvalues. +-* +-* The left and right Schur vectors are the columns of VSL and VSR, +-* respectively, where VSL and VSR are the orthogonal matrices +-* which reduce A and B to Schur form: +-* +-* Schur form of (A,B) = ( (VSL)**T A (VSR), (VSL)**T B (VSR) ) ++* SGEGS computes the eigenvalues, real Schur form, and, optionally, ++* left and or/right Schur vectors of a real matrix pair (A,B). ++* Given two square matrices A and B, the generalized real Schur ++* factorization has the form ++* ++* A = Q*S*Z**T, B = Q*T*Z**T ++* ++* where Q and Z are orthogonal matrices, T is upper triangular, and S ++* is an upper quasi-triangular matrix with 1-by-1 and 2-by-2 diagonal ++* blocks, the 2-by-2 blocks corresponding to complex conjugate pairs ++* of eigenvalues of (A,B). The columns of Q are the left Schur vectors ++* and the columns of Z are the right Schur vectors. ++* ++* If only the eigenvalues of (A,B) are needed, the driver routine ++* SGEGV should be used instead. See SGEGV for a description of the ++* eigenvalues of the generalized nonsymmetric eigenvalue problem ++* (GNEP). + * + * Arguments + * ========= + * + * JOBVSL (input) CHARACTER*1 + * = 'N': do not compute the left Schur vectors; +-* = 'V': compute the left Schur vectors. ++* = 'V': compute the left Schur vectors (returned in VSL). + * + * JOBVSR (input) CHARACTER*1 + * = 'N': do not compute the right Schur vectors; +-* = 'V': compute the right Schur vectors. ++* = 'V': compute the right Schur vectors (returned in VSR). + * + * N (input) INTEGER + * The order of the matrices A, B, VSL, and VSR. N >= 0. + * + * A (input/output) REAL array, dimension (LDA, N) +-* On entry, the first of the pair of matrices whose generalized +-* eigenvalues and (optionally) Schur vectors are to be +-* computed. +-* On exit, the generalized Schur form of A. +-* Note: to avoid overflow, the Frobenius norm of the matrix +-* A should be less than the overflow threshold. ++* On entry, the matrix A. ++* On exit, the upper quasi-triangular matrix S from the ++* generalized real Schur factorization. + * + * LDA (input) INTEGER + * The leading dimension of A. LDA >= max(1,N). + * + * B (input/output) REAL array, dimension (LDB, N) +-* On entry, the second of the pair of matrices whose +-* generalized eigenvalues and (optionally) Schur vectors are +-* to be computed. +-* On exit, the generalized Schur form of B. +-* Note: to avoid overflow, the Frobenius norm of the matrix +-* B should be less than the overflow threshold. ++* On entry, the matrix B. ++* On exit, the upper triangular matrix T from the generalized ++* real Schur factorization. + * + * LDB (input) INTEGER + * The leading dimension of B. LDB >= max(1,N). + * + * ALPHAR (output) REAL array, dimension (N) ++* The real parts of each scalar alpha defining an eigenvalue ++* of GNEP. ++* + * ALPHAI (output) REAL array, dimension (N) +-* BETA (output) REAL array, dimension (N) +-* On exit, (ALPHAR(j) + ALPHAI(j)*i)/BETA(j), j=1,...,N, will +-* be the generalized eigenvalues. ALPHAR(j) + ALPHAI(j)*i, +-* j=1,...,N and BETA(j),j=1,...,N are the diagonals of the +-* complex Schur form (A,B) that would result if the 2-by-2 +-* diagonal blocks of the real Schur form of (A,B) were further +-* reduced to triangular form using 2-by-2 complex unitary +-* transformations. If ALPHAI(j) is zero, then the j-th ++* The imaginary parts of each scalar alpha defining an ++* eigenvalue of GNEP. If ALPHAI(j) is zero, then the j-th + * eigenvalue is real; if positive, then the j-th and (j+1)-st +-* eigenvalues are a complex conjugate pair, with ALPHAI(j+1) +-* negative. ++* eigenvalues are a complex conjugate pair, with ++* ALPHAI(j+1) = -ALPHAI(j). + * +-* Note: the quotients ALPHAR(j)/BETA(j) and ALPHAI(j)/BETA(j) +-* may easily over- or underflow, and BETA(j) may even be zero. +-* Thus, the user should avoid naively computing the ratio +-* alpha/beta. However, ALPHAR and ALPHAI will be always less +-* than and usually comparable with norm(A) in magnitude, and +-* BETA always less than and usually comparable with norm(B). ++* BETA (output) REAL array, dimension (N) ++* The scalars beta that define the eigenvalues of GNEP. ++* Together, the quantities alpha = (ALPHAR(j),ALPHAI(j)) and ++* beta = BETA(j) represent the j-th eigenvalue of the matrix ++* pair (A,B), in one of the forms lambda = alpha/beta or ++* mu = beta/alpha. Since either lambda or mu may overflow, ++* they should not, in general, be computed. + * + * VSL (output) REAL array, dimension (LDVSL,N) +-* If JOBVSL = 'V', VSL will contain the left Schur vectors. +-* (See "Purpose", above.) ++* If JOBVSL = 'V', the matrix of left Schur vectors Q. + * Not referenced if JOBVSL = 'N'. + * + * LDVSL (input) INTEGER +@@ -128,8 +98,7 @@ + * if JOBVSL = 'V', LDVSL >= N. + * + * VSR (output) REAL array, dimension (LDVSR,N) +-* If JOBVSR = 'V', VSR will contain the right Schur vectors. +-* (See "Purpose", above.) ++* If JOBVSR = 'V', the matrix of right Schur vectors Z. + * Not referenced if JOBVSR = 'N'. + * + * LDVSR (input) INTEGER +diff -uNr LAPACK.orig/SRC/sgegv.f LAPACK/SRC/sgegv.f +--- LAPACK.orig/SRC/sgegv.f Thu Nov 4 14:25:42 1999 ++++ LAPACK/SRC/sgegv.f Fri May 25 16:02:12 2001 +@@ -4,7 +4,7 @@ + * -- LAPACK driver routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* June 30, 1999 ++* April 26, 2001 + * + * .. Scalar Arguments .. + CHARACTER JOBVL, JOBVR +@@ -21,23 +21,32 @@ + * + * This routine is deprecated and has been replaced by routine SGGEV. + * +-* SGEGV computes for a pair of n-by-n real nonsymmetric matrices A and +-* B, the generalized eigenvalues (alphar +/- alphai*i, beta), and +-* optionally, the left and/or right generalized eigenvectors (VL and +-* VR). +-* +-* A generalized eigenvalue for a pair of matrices (A,B) is, roughly +-* speaking, a scalar w or a ratio alpha/beta = w, such that A - w*B +-* is singular. It is usually represented as the pair (alpha,beta), +-* as there is a reasonable interpretation for beta=0, and even for +-* both being zero. A good beginning reference is the book, "Matrix +-* Computations", by G. Golub & C. van Loan (Johns Hopkins U. Press) +-* +-* A right generalized eigenvector corresponding to a generalized +-* eigenvalue w for a pair of matrices (A,B) is a vector r such +-* that (A - w B) r = 0 . A left generalized eigenvector is a vector +-* l such that l**H * (A - w B) = 0, where l**H is the +-* conjugate-transpose of l. ++* SGEGV computes the eigenvalues and, optionally, the left and/or right ++* eigenvectors of a real matrix pair (A,B). ++* Given two square matrices A and B, ++* the generalized nonsymmetric eigenvalue problem (GNEP) is to find the ++* eigenvalues lambda and corresponding (non-zero) eigenvectors x such ++* that ++* ++* A*x = lambda*B*x. ++* ++* An alternate form is to find the eigenvalues mu and corresponding ++* eigenvectors y such that ++* ++* mu*A*y = B*y. ++* ++* These two forms are equivalent with mu = 1/lambda and x = y if ++* neither lambda nor mu is zero. In order to deal with the case that ++* lambda or mu is zero or small, two values alpha and beta are returned ++* for each eigenvalue, such that lambda = alpha/beta and ++* mu = beta/alpha. ++* ++* The vectors x and y in the above equations are right eigenvectors of ++* the matrix pair (A,B). Vectors u and v satisfying ++* ++* u**H*A = lambda*u**H*B or mu*v**H*A = v**H*B ++* ++* are left eigenvectors of (A,B). + * + * Note: this routine performs "full balancing" on A and B -- see + * "Further Details", below. +@@ -47,63 +56,75 @@ + * + * JOBVL (input) CHARACTER*1 + * = 'N': do not compute the left generalized eigenvectors; +-* = 'V': compute the left generalized eigenvectors. ++* = 'V': compute the left generalized eigenvectors (returned ++* in VL). + * + * JOBVR (input) CHARACTER*1 + * = 'N': do not compute the right generalized eigenvectors; +-* = 'V': compute the right generalized eigenvectors. ++* = 'V': compute the right generalized eigenvectors (returned ++* in VR). + * + * N (input) INTEGER + * The order of the matrices A, B, VL, and VR. N >= 0. + * + * A (input/output) REAL array, dimension (LDA, N) +-* On entry, the first of the pair of matrices whose +-* generalized eigenvalues and (optionally) generalized +-* eigenvectors are to be computed. +-* On exit, the contents will have been destroyed. (For a +-* description of the contents of A on exit, see "Further +-* Details", below.) ++* On entry, the matrix A. ++* If JOBVL = 'V' or JOBVR = 'V', then on exit A ++* contains the real Schur form of A from the generalized Schur ++* factorization of the pair (A,B) after balancing. ++* If no eigenvectors were computed, then only the diagonal ++* blocks from the Schur form will be correct. See SGGHRD and ++* SHGEQZ for details. + * + * LDA (input) INTEGER + * The leading dimension of A. LDA >= max(1,N). + * + * B (input/output) REAL array, dimension (LDB, N) +-* On entry, the second of the pair of matrices whose +-* generalized eigenvalues and (optionally) generalized +-* eigenvectors are to be computed. +-* On exit, the contents will have been destroyed. (For a +-* description of the contents of B on exit, see "Further +-* Details", below.) ++* On entry, the matrix B. ++* If JOBVL = 'V' or JOBVR = 'V', then on exit B contains the ++* upper triangular matrix obtained from B in the generalized ++* Schur factorization of the pair (A,B) after balancing. ++* If no eigenvectors were computed, then only those elements of ++* B corresponding to the diagonal blocks from the Schur form of ++* A will be correct. See SGGHRD and SHGEQZ for details. + * + * LDB (input) INTEGER + * The leading dimension of B. LDB >= max(1,N). + * + * ALPHAR (output) REAL array, dimension (N) ++* The real parts of each scalar alpha defining an eigenvalue of ++* GNEP. ++* + * ALPHAI (output) REAL array, dimension (N) +-* BETA (output) REAL array, dimension (N) +-* On exit, (ALPHAR(j) + ALPHAI(j)*i)/BETA(j), j=1,...,N, will +-* be the generalized eigenvalues. If ALPHAI(j) is zero, then +-* the j-th eigenvalue is real; if positive, then the j-th and ++* The imaginary parts of each scalar alpha defining an ++* eigenvalue of GNEP. If ALPHAI(j) is zero, then the j-th ++* eigenvalue is real; if positive, then the j-th and + * (j+1)-st eigenvalues are a complex conjugate pair, with +-* ALPHAI(j+1) negative. ++* ALPHAI(j+1) = -ALPHAI(j). + * +-* Note: the quotients ALPHAR(j)/BETA(j) and ALPHAI(j)/BETA(j) +-* may easily over- or underflow, and BETA(j) may even be zero. +-* Thus, the user should avoid naively computing the ratio +-* alpha/beta. However, ALPHAR and ALPHAI will be always less +-* than and usually comparable with norm(A) in magnitude, and +-* BETA always less than and usually comparable with norm(B). ++* BETA (output) REAL array, dimension (N) ++* The scalars beta that define the eigenvalues of GNEP. ++* ++* Together, the quantities alpha = (ALPHAR(j),ALPHAI(j)) and ++* beta = BETA(j) represent the j-th eigenvalue of the matrix ++* pair (A,B), in one of the forms lambda = alpha/beta or ++* mu = beta/alpha. Since either lambda or mu may overflow, ++* they should not, in general, be computed. + * + * VL (output) REAL array, dimension (LDVL,N) +-* If JOBVL = 'V', the left generalized eigenvectors. (See +-* "Purpose", above.) Real eigenvectors take one column, +-* complex take two columns, the first for the real part and +-* the second for the imaginary part. Complex eigenvectors +-* correspond to an eigenvalue with positive imaginary part. +-* Each eigenvector will be scaled so the largest component +-* will have abs(real part) + abs(imag. part) = 1, *except* +-* that for eigenvalues with alpha=beta=0, a zero vector will +-* be returned as the corresponding eigenvector. ++* If JOBVL = 'V', the left eigenvectors u(j) are stored ++* in the columns of VL, in the same order as their eigenvalues. ++* If the j-th eigenvalue is real, then u(j) = VL(:,j). ++* If the j-th and (j+1)-st eigenvalues form a complex conjugate ++* pair, then ++* u(j) = VL(:,j) + i*VL(:,j+1) ++* and ++* u(j+1) = VL(:,j) - i*VL(:,j+1). ++* ++* Each eigenvector is scaled so that its largest component has ++* abs(real part) + abs(imag. part) = 1, except for eigenvectors ++* corresponding to an eigenvalue with alpha = beta = 0, which ++* are set to zero. + * Not referenced if JOBVL = 'N'. + * + * LDVL (input) INTEGER +@@ -111,15 +132,19 @@ + * if JOBVL = 'V', LDVL >= N. + * + * VR (output) REAL array, dimension (LDVR,N) +-* If JOBVR = 'V', the right generalized eigenvectors. (See +-* "Purpose", above.) Real eigenvectors take one column, +-* complex take two columns, the first for the real part and +-* the second for the imaginary part. Complex eigenvectors +-* correspond to an eigenvalue with positive imaginary part. +-* Each eigenvector will be scaled so the largest component +-* will have abs(real part) + abs(imag. part) = 1, *except* +-* that for eigenvalues with alpha=beta=0, a zero vector will +-* be returned as the corresponding eigenvector. ++* If JOBVR = 'V', the right eigenvectors x(j) are stored ++* in the columns of VR, in the same order as their eigenvalues. ++* If the j-th eigenvalue is real, then x(j) = VR(:,j). ++* If the j-th and (j+1)-st eigenvalues form a complex conjugate ++* pair, then ++* x(j) = VR(:,j) + i*VR(:,j+1) ++* and ++* x(j+1) = VR(:,j) - i*VR(:,j+1). ++* ++* Each eigenvector is scaled so that its largest component has ++* abs(real part) + abs(imag. part) = 1, except for eigenvalues ++* corresponding to an eigenvalue with alpha = beta = 0, which ++* are set to zero. + * Not referenced if JOBVR = 'N'. + * + * LDVR (input) INTEGER +diff -uNr LAPACK.orig/SRC/sgelsd.f LAPACK/SRC/sgelsd.f +--- LAPACK.orig/SRC/sgelsd.f Thu Nov 4 14:26:24 1999 ++++ LAPACK/SRC/sgelsd.f Fri May 25 16:03:05 2001 +@@ -4,7 +4,8 @@ + * -- LAPACK driver routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* October 31, 1999 ++* June 30, 1999 ++* 8-15-00: Improve consistency of WS calculations (eca) + * + * .. Scalar Arguments .. + INTEGER INFO, LDA, LDB, LWORK, M, N, NRHS, RANK +@@ -61,9 +62,10 @@ + * The number of right hand sides, i.e., the number of columns + * of the matrices B and X. NRHS >= 0. + * +-* A (input) REAL array, dimension (LDA,N) ++* A (input/output) REAL array, dimension (LDA,N) + * On entry, the M-by-N matrix A. +-* On exit, A has been destroyed. ++* On exit, the first min(m,n) rows of A are overwritten with ++* its right singular vectors, stored rowwise. + * + * LDA (input) INTEGER + * The leading dimension of the array A. LDA >= max(1,M). +@@ -95,24 +97,20 @@ + * On exit, if INFO = 0, WORK(1) returns the optimal LWORK. + * + * LWORK (input) INTEGER +-* The dimension of the array WORK. LWORK must be at least 1. ++* The dimension of the array WORK. LWORK >= 1. + * The exact minimum amount of workspace needed depends on M, +-* N and NRHS. As long as LWORK is at least +-* 12*N + 2*N*SMLSIZ + 8*N*NLVL + N*NRHS + (SMLSIZ+1)**2, +-* if M is greater than or equal to N or +-* 12*M + 2*M*SMLSIZ + 8*M*NLVL + M*NRHS + (SMLSIZ+1)**2, +-* if M is less than N, the code will execute correctly. ++* N and NRHS. ++* If M >= N, LWORK >= 11*N + 2*N*SMLSIZ + 8*N*NLVL + N*NRHS. ++* If M < N, LWORK >= 11*M + 2*M*SMLSIZ + 8*M*NLVL + M*NRHS. + * SMLSIZ is returned by ILAENV and is equal to the maximum + * size of the subproblems at the bottom of the computation + * tree (usually about 25), and +-* NLVL = MAX( 0, INT( LOG_2( MIN( M,N )/(SMLSIZ+1) ) ) + 1 ) ++* NLVL = INT( LOG_2( MIN( M,N )/(SMLSIZ+1) ) ) + 1 + * For good performance, LWORK should generally be larger. + * +-* If LWORK = -1, then a workspace query is assumed; the routine +-* only calculates the optimal size of the WORK array, returns +-* this value as the first entry of the WORK array, and no error +-* message related to LWORK is issued by XERBLA. +-* ++* If LWORK = -1, a workspace query is assumed. The optimal ++* size for the WORK array is calculated and stored in WORK(1), ++* and no other work except argument checking is performed. + * + * IWORK (workspace) INTEGER array, dimension (LIWORK) + * LIWORK >= 3 * MINMN * NLVL + 11 * MINMN, +@@ -136,14 +134,15 @@ + * ===================================================================== + * + * .. Parameters .. ++ INTEGER LQUERV ++ PARAMETER ( LQUERV = -1 ) + REAL ZERO, ONE, TWO + PARAMETER ( ZERO = 0.0E0, ONE = 1.0E0, TWO = 2.0E0 ) + * .. + * .. Local Scalars .. +- LOGICAL LQUERY + INTEGER IASCL, IBSCL, IE, IL, ITAU, ITAUP, ITAUQ, + $ LDWORK, MAXMN, MAXWRK, MINMN, MINWRK, MM, +- $ MNTHR, NLVL, NWORK, SMLSIZ, WLALSD ++ $ MNTHR, NLVL, NWORK, SMLSIZ + REAL ANRM, BIGNUM, BNRM, EPS, SFMIN, SMLNUM + * .. + * .. External Subroutines .. +@@ -166,7 +165,6 @@ + MINMN = MIN( M, N ) + MAXMN = MAX( M, N ) + MNTHR = ILAENV( 6, 'SGELSD', ' ', M, N, NRHS, -1 ) +- LQUERY = ( LWORK.EQ.-1 ) + IF( M.LT.0 ) THEN + INFO = -1 + ELSE IF( N.LT.0 ) THEN +@@ -190,8 +188,8 @@ + * + MINWRK = 1 + MINMN = MAX( 1, MINMN ) +- NLVL = MAX( INT( LOG( REAL( MINMN ) / REAL( SMLSIZ+1 ) ) / +- $ LOG( TWO ) ) + 1, 0 ) ++ NLVL = INT( LOG( REAL( MINMN ) / REAL( SMLSIZ+1 ) ) / ++ $ LOG( TWO ) ) + 1 + * + IF( INFO.EQ.0 ) THEN + MAXWRK = 0 +@@ -216,12 +214,11 @@ + $ ILAENV( 1, 'SORMBR', 'QLT', MM, NRHS, N, -1 ) ) + MAXWRK = MAX( MAXWRK, 3*N+( N-1 )* + $ ILAENV( 1, 'SORMBR', 'PLN', N, NRHS, N, -1 ) ) +- WLALSD = 9*N+2*N*SMLSIZ+8*N*NLVL+N*NRHS+(SMLSIZ+1)**2 +- MAXWRK = MAX( MAXWRK, 3*N+WLALSD ) +- MINWRK = MAX( 3*N+MM, 3*N+NRHS, 3*N+WLALSD ) ++ MAXWRK = MAX( MAXWRK, 3*N+8*N+2*N*SMLSIZ+8*N*NLVL+N*NRHS ) ++ MINWRK = MAX( 3*N+MM, 3*N+NRHS, ++ $ 3*N+8*N+2*N*SMLSIZ+8*N*NLVL+N*NRHS ) + END IF + IF( N.GT.M ) THEN +- WLALSD = 9*M+2*M*SMLSIZ+8*M*NLVL+M*NRHS+(SMLSIZ+1)**2 + IF( N.GE.MNTHR ) THEN + * + * Path 2a - underdetermined, with many more columns +@@ -241,7 +238,8 @@ + END IF + MAXWRK = MAX( MAXWRK, M+NRHS* + $ ILAENV( 1, 'SORMLQ', 'LT', N, NRHS, M, -1 ) ) +- MAXWRK = MAX( MAXWRK, M*M+4*M+WLALSD ) ++ MAXWRK = MAX( MAXWRK, M*M+4*M+8*M+2*M*SMLSIZ+8*M*NLVL+M* ++ $ NRHS ) + ELSE + * + * Path 2 - remaining underdetermined cases. +@@ -252,26 +250,25 @@ + $ ILAENV( 1, 'SORMBR', 'QLT', M, NRHS, N, -1 ) ) + MAXWRK = MAX( MAXWRK, 3*M+M* + $ ILAENV( 1, 'SORMBR', 'PLN', N, NRHS, M, -1 ) ) +- MAXWRK = MAX( MAXWRK, 3*M+WLALSD ) ++ MAXWRK = MAX( MAXWRK, 3*M+8*M+2*M*SMLSIZ+8*M*NLVL+M* ++ $ NRHS ) + END IF +- MINWRK = MAX( 3*M+NRHS, 3*M+M, 3*M+WLALSD ) ++ MINWRK = MAX( 3*M+NRHS, 3*M+M, ++ $ 3*M+8*M+2*M*SMLSIZ+8*M*NLVL+M*NRHS ) + END IF + MINWRK = MIN( MINWRK, MAXWRK ) + WORK( 1 ) = MAXWRK +- IF( LWORK.LT.MINWRK .AND. .NOT.LQUERY ) THEN +- INFO = -12 +- END IF ++ IF( LWORK.LT.MINWRK .AND. LWORK.NE.LQUERV ) ++ $ INFO = -12 + END IF + * ++* Quick returns ++* + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'SGELSD', -INFO ) + RETURN +- ELSE IF( LQUERY ) THEN +- GO TO 10 + END IF +-* +-* Quick return if possible. +-* ++ IF( LWORK.EQ.LQUERV ) RETURN + IF( M.EQ.0 .OR. N.EQ.0 ) THEN + RANK = 0 + RETURN +diff -uNr LAPACK.orig/SRC/sgelss.f LAPACK/SRC/sgelss.f +--- LAPACK.orig/SRC/sgelss.f Thu Nov 4 14:23:34 1999 ++++ LAPACK/SRC/sgelss.f Fri May 25 16:03:41 2001 +@@ -4,7 +4,7 @@ + * -- LAPACK driver routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* October 31, 1999 ++* April 25, 2001 + * + * .. Scalar Arguments .. + INTEGER INFO, LDA, LDB, LWORK, M, N, NRHS, RANK +@@ -86,10 +86,9 @@ + * LWORK >= 3*min(M,N) + max( 2*min(M,N), max(M,N), NRHS ) + * For good performance, LWORK should generally be larger. + * +-* If LWORK = -1, then a workspace query is assumed; the routine +-* only calculates the optimal size of the WORK array, returns +-* this value as the first entry of the WORK array, and no error +-* message related to LWORK is issued by XERBLA. ++* If LWORK = -1, a workspace query is assumed. The optimal ++* size for the WORK array is calculated and stored in WORK(1), ++* and no other work except argument checking is performed. + * + * INFO (output) INTEGER + * = 0: successful exit +@@ -156,7 +155,7 @@ + * following subroutine, as returned by ILAENV.) + * + MINWRK = 1 +- IF( INFO.EQ.0 .AND. ( LWORK.GE.1 .OR. LQUERY ) ) THEN ++ IF( INFO.EQ.0 ) THEN + MAXWRK = 0 + MM = M + IF( M.GE.N .AND. M.GE.MNTHR ) THEN +@@ -229,20 +228,18 @@ + END IF + MAXWRK = MAX( MINWRK, MAXWRK ) + WORK( 1 ) = MAXWRK ++ IF( LWORK.LT.MINWRK .AND. .NOT.LQUERY ) ++ $ INFO = -12 + END IF + * +- MINWRK = MAX( MINWRK, 1 ) +- IF( LWORK.LT.MINWRK .AND. .NOT.LQUERY ) +- $ INFO = -12 ++* Quick returns ++* + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'SGELSS', -INFO ) + RETURN + ELSE IF( LQUERY ) THEN + RETURN + END IF +-* +-* Quick return if possible +-* + IF( M.EQ.0 .OR. N.EQ.0 ) THEN + RANK = 0 + RETURN +@@ -491,8 +488,8 @@ + DO 40 I = 1, NRHS, CHUNK + BL = MIN( NRHS-I+1, CHUNK ) + CALL SGEMM( 'T', 'N', M, BL, M, ONE, WORK( IL ), LDWORK, +- $ B( 1, I ), LDB, ZERO, WORK( IWORK ), N ) +- CALL SLACPY( 'G', M, BL, WORK( IWORK ), N, B( 1, I ), ++ $ B( 1, I ), LDB, ZERO, WORK( IWORK ), M ) ++ CALL SLACPY( 'G', M, BL, WORK( IWORK ), M, B( 1, I ), + $ LDB ) + 40 CONTINUE + ELSE +diff -uNr LAPACK.orig/SRC/sgesdd.f LAPACK/SRC/sgesdd.f +--- LAPACK.orig/SRC/sgesdd.f Thu Nov 11 20:32:10 1999 ++++ LAPACK/SRC/sgesdd.f Fri May 25 16:07:52 2001 +@@ -1,10 +1,11 @@ +- SUBROUTINE SGESDD( JOBZ, M, N, A, LDA, S, U, LDU, VT, LDVT, WORK, +- $ LWORK, IWORK, INFO ) ++ SUBROUTINE SGESDD( JOBZ, M, N, A, LDA, S, U, LDU, VT, LDVT, ++ $ WORK, LWORK, IWORK, INFO ) + * + * -- LAPACK driver routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* October 31, 1999 ++* June 30, 1999 ++* 8-15-00: Improve consistency of WS calculations (eca) + * + * .. Scalar Arguments .. + CHARACTER JOBZ +@@ -116,16 +117,20 @@ + * LWORK (input) INTEGER + * The dimension of the array WORK. LWORK >= 1. + * If JOBZ = 'N', +-* LWORK >= 3*min(M,N) + max(max(M,N),6*min(M,N)). ++* LWORK >= max(14*min(M,N)+4, 10*min(M,N)+2+ ++* SMLSIZ*(SMLSIZ+8)) + max(M,N) ++* where SMLSIZ is returned by ILAENV and is equal to the ++* maximum size of the subproblems at the bottom of the ++* computation tree (usually about 25). + * If JOBZ = 'O', +-* LWORK >= 3*min(M,N)*min(M,N) + +-* max(max(M,N),5*min(M,N)*min(M,N)+4*min(M,N)). ++* LWORK >= 5*min(M,N)*min(M,N) + max(M,N) + 9*min(M,N). + * If JOBZ = 'S' or 'A' +-* LWORK >= 3*min(M,N)*min(M,N) + +-* max(max(M,N),4*min(M,N)*min(M,N)+4*min(M,N)). ++* LWORK >= 4*min(M,N)*min(M,N) + max(M,N) + 9*min(M,N). + * For good performance, LWORK should generally be larger. +-* If LWORK < 0 but other input arguments are legal, WORK(1) +-* returns the optimal LWORK. ++* ++* If LWORK = -1, a workspace query is assumed. The optimal ++* size for the WORK array is calculated and stored in WORK(1), ++* and no other work except argument checking is performed. + * + * IWORK (workspace) INTEGER array, dimension (8*min(M,N)) + * +@@ -144,15 +149,17 @@ + * ===================================================================== + * + * .. Parameters .. ++ INTEGER LQUERV ++ PARAMETER ( LQUERV = -1 ) + REAL ZERO, ONE +- PARAMETER ( ZERO = 0.0E0, ONE = 1.0E0 ) ++ PARAMETER ( ZERO = 0.0E+0, ONE = 1.0E+0 ) + * .. + * .. Local Scalars .. +- LOGICAL LQUERY, WNTQA, WNTQAS, WNTQN, WNTQO, WNTQS +- INTEGER BDSPAC, BLK, CHUNK, I, IE, IERR, IL, ++ LOGICAL WNTQA, WNTQAS, WNTQN, WNTQO, WNTQS ++ INTEGER BDSPAC, BDSPAN, BLK, CHUNK, I, IE, IERR, IL, + $ IR, ISCL, ITAU, ITAUP, ITAUQ, IU, IVT, LDWKVT, + $ LDWRKL, LDWRKR, LDWRKU, MAXWRK, MINMN, MINWRK, +- $ MNTHR, NWORK, WRKBL ++ $ MNTHR, NWORK, SMLSIZ, WRKBL + REAL ANRM, BIGNUM, EPS, SMLNUM + * .. + * .. Local Arrays .. +@@ -168,10 +175,10 @@ + LOGICAL LSAME + INTEGER ILAENV + REAL SLAMCH, SLANGE +- EXTERNAL ILAENV, LSAME, SLAMCH, SLANGE ++ EXTERNAL SLAMCH, SLANGE, ILAENV, LSAME + * .. + * .. Intrinsic Functions .. +- INTRINSIC INT, MAX, MIN, REAL, SQRT ++ INTRINSIC REAL, INT, MAX, MIN, SQRT + * .. + * .. Executable Statements .. + * +@@ -179,7 +186,7 @@ + * + INFO = 0 + MINMN = MIN( M, N ) +- MNTHR = INT( MINMN*11.0E0 / 6.0E0 ) ++ MNTHR = INT( MINMN*11.0 / 6.0 ) + WNTQA = LSAME( JOBZ, 'A' ) + WNTQS = LSAME( JOBZ, 'S' ) + WNTQAS = WNTQA .OR. WNTQS +@@ -187,7 +194,6 @@ + WNTQN = LSAME( JOBZ, 'N' ) + MINWRK = 1 + MAXWRK = 1 +- LQUERY = ( LWORK.EQ.-1 ) + * + IF( .NOT.( WNTQA .OR. WNTQS .OR. WNTQO .OR. WNTQN ) ) THEN + INFO = -1 +@@ -206,6 +212,8 @@ + INFO = -10 + END IF + * ++ SMLSIZ = ILAENV( 9, 'SGESDD', ' ', 0, 0, 0, 0 ) ++* + * Compute workspace + * (Note: Comments in the code beginning "Workspace:" describe the + * minimal amount of workspace needed at that point in the code, +@@ -218,22 +226,19 @@ + * + * Compute space needed for SBDSDC + * +- IF( WNTQN ) THEN +- BDSPAC = 7*N +- ELSE +- BDSPAC = 3*N*N + 4*N +- END IF ++ BDSPAC = 3*N*N + 7*N ++ BDSPAN = MAX( 12*N+4, 8*N+2+SMLSIZ*( SMLSIZ+8 ) ) + IF( M.GE.MNTHR ) THEN + IF( WNTQN ) THEN + * + * Path 1 (M much larger than N, JOBZ='N') + * +- WRKBL = N + N*ILAENV( 1, 'SGEQRF', ' ', M, N, -1, +- $ -1 ) +- WRKBL = MAX( WRKBL, 3*N+2*N* +- $ ILAENV( 1, 'SGEBRD', ' ', N, N, -1, -1 ) ) +- MAXWRK = MAX( WRKBL, BDSPAC+N ) +- MINWRK = BDSPAC + N ++ MAXWRK = N + N*ILAENV( 1, 'SGEQRF', ' ', M, N, -1, ++ $ -1 ) ++ MAXWRK = MAX( MAXWRK, 3*N+2*N* ++ $ ILAENV( 1, 'SGEBRD', ' ', N, N, -1, -1 ) ) ++ MAXWRK = MAX( MAXWRK, BDSPAC ) ++ MINWRK = BDSPAC + ELSE IF( WNTQO ) THEN + * + * Path 2 (M much larger than N, JOBZ='O') +@@ -247,9 +252,9 @@ + $ ILAENV( 1, 'SORMBR', 'QLN', N, N, N, -1 ) ) + WRKBL = MAX( WRKBL, 3*N+N* + $ ILAENV( 1, 'SORMBR', 'PRT', N, N, N, -1 ) ) +- WRKBL = MAX( WRKBL, BDSPAC+3*N ) ++ WRKBL = MAX( WRKBL, BDSPAC+2*N ) + MAXWRK = WRKBL + 2*N*N +- MINWRK = BDSPAC + 2*N*N + 3*N ++ MINWRK = BDSPAC + 2*N*N + 2*N + ELSE IF( WNTQS ) THEN + * + * Path 3 (M much larger than N, JOBZ='S') +@@ -263,9 +268,9 @@ + $ ILAENV( 1, 'SORMBR', 'QLN', N, N, N, -1 ) ) + WRKBL = MAX( WRKBL, 3*N+N* + $ ILAENV( 1, 'SORMBR', 'PRT', N, N, N, -1 ) ) +- WRKBL = MAX( WRKBL, BDSPAC+3*N ) ++ WRKBL = MAX( WRKBL, BDSPAC+2*N ) + MAXWRK = WRKBL + N*N +- MINWRK = BDSPAC + N*N + 3*N ++ MINWRK = BDSPAC + N*N + 2*N + ELSE IF( WNTQA ) THEN + * + * Path 4 (M much larger than N, JOBZ='A') +@@ -279,9 +284,9 @@ + $ ILAENV( 1, 'SORMBR', 'QLN', N, N, N, -1 ) ) + WRKBL = MAX( WRKBL, 3*N+N* + $ ILAENV( 1, 'SORMBR', 'PRT', N, N, N, -1 ) ) +- WRKBL = MAX( WRKBL, BDSPAC+3*N ) +- MAXWRK = WRKBL + N*N +- MINWRK = BDSPAC + N*N + 3*N ++ WRKBL = MAX( WRKBL, BDSPAC+2*N ) ++ MAXWRK = N*N + WRKBL ++ MINWRK = BDSPAC + N*N + M + N + END IF + ELSE + * +@@ -289,53 +294,47 @@ + * + WRKBL = 3*N + ( M+N )*ILAENV( 1, 'SGEBRD', ' ', M, N, -1, + $ -1 ) +- IF( WNTQN ) THEN +- MAXWRK = MAX( WRKBL, BDSPAC+3*N ) +- MINWRK = 3*N + MAX( M, BDSPAC ) +- ELSE IF( WNTQO ) THEN ++ IF( WNTQO ) THEN + WRKBL = MAX( WRKBL, 3*N+N* + $ ILAENV( 1, 'SORMBR', 'QLN', M, N, N, -1 ) ) + WRKBL = MAX( WRKBL, 3*N+N* + $ ILAENV( 1, 'SORMBR', 'PRT', N, N, N, -1 ) ) +- WRKBL = MAX( WRKBL, BDSPAC+3*N ) ++ WRKBL = MAX( WRKBL, BDSPAC+2*N+M ) + MAXWRK = WRKBL + M*N +- MINWRK = 3*N + MAX( M, N*N+BDSPAC ) ++ MINWRK = BDSPAC + N*N + 2*N + M + ELSE IF( WNTQS ) THEN +- WRKBL = MAX( WRKBL, 3*N+N* +- $ ILAENV( 1, 'SORMBR', 'QLN', M, N, N, -1 ) ) +- WRKBL = MAX( WRKBL, 3*N+N* +- $ ILAENV( 1, 'SORMBR', 'PRT', N, N, N, -1 ) ) +- MAXWRK = MAX( WRKBL, BDSPAC+3*N ) +- MINWRK = 3*N + MAX( M, BDSPAC ) ++ MAXWRK = MAX( MAXWRK, 3*N+N* ++ $ ILAENV( 1, 'SORMBR', 'QLN', M, N, N, -1 ) ) ++ MAXWRK = MAX( MAXWRK, 3*N+N* ++ $ ILAENV( 1, 'SORMBR', 'PRT', N, N, N, -1 ) ) ++ MAXWRK = MAX( MAXWRK, BDSPAC+2*N+M ) ++ MINWRK = BDSPAC + 2*N + M + ELSE IF( WNTQA ) THEN +- WRKBL = MAX( WRKBL, 3*N+M* +- $ ILAENV( 1, 'SORMBR', 'QLN', M, M, N, -1 ) ) +- WRKBL = MAX( WRKBL, 3*N+N* +- $ ILAENV( 1, 'SORMBR', 'PRT', N, N, N, -1 ) ) +- MAXWRK = MAX( MAXWRK, BDSPAC+3*N ) +- MINWRK = 3*N + MAX( M, BDSPAC ) ++ MAXWRK = MAX( MAXWRK, 3*N+M* ++ $ ILAENV( 1, 'SORMBR', 'QLN', M, M, N, -1 ) ) ++ MAXWRK = MAX( MAXWRK, 3*N+N* ++ $ ILAENV( 1, 'SORMBR', 'PRT', N, N, N, -1 ) ) ++ MAXWRK = MAX( MAXWRK, BDSPAC+2*N+M ) ++ MINWRK = BDSPAC + 2*N + M + END IF + END IF + ELSE + * + * Compute space needed for SBDSDC + * +- IF( WNTQN ) THEN +- BDSPAC = 7*M +- ELSE +- BDSPAC = 3*M*M + 4*M +- END IF ++ BDSPAC = 3*M*M + 7*M ++ BDSPAN = MAX( 12*M+4, 8*M+2+SMLSIZ*( SMLSIZ+8 ) ) + IF( N.GE.MNTHR ) THEN + IF( WNTQN ) THEN + * + * Path 1t (N much larger than M, JOBZ='N') + * +- WRKBL = M + M*ILAENV( 1, 'SGELQF', ' ', M, N, -1, +- $ -1 ) +- WRKBL = MAX( WRKBL, 3*M+2*M* +- $ ILAENV( 1, 'SGEBRD', ' ', M, M, -1, -1 ) ) +- MAXWRK = MAX( WRKBL, BDSPAC+M ) +- MINWRK = BDSPAC + M ++ MAXWRK = M + M*ILAENV( 1, 'SGELQF', ' ', M, N, -1, ++ $ -1 ) ++ MAXWRK = MAX( MAXWRK, 3*M+2*M* ++ $ ILAENV( 1, 'SGEBRD', ' ', M, M, -1, -1 ) ) ++ MAXWRK = MAX( MAXWRK, BDSPAC ) ++ MINWRK = BDSPAC + ELSE IF( WNTQO ) THEN + * + * Path 2t (N much larger than M, JOBZ='O') +@@ -349,9 +348,9 @@ + $ ILAENV( 1, 'SORMBR', 'QLN', M, M, M, -1 ) ) + WRKBL = MAX( WRKBL, 3*M+M* + $ ILAENV( 1, 'SORMBR', 'PRT', M, M, M, -1 ) ) +- WRKBL = MAX( WRKBL, BDSPAC+3*M ) ++ WRKBL = MAX( WRKBL, BDSPAC+2*M ) + MAXWRK = WRKBL + 2*M*M +- MINWRK = BDSPAC + 2*M*M + 3*M ++ MINWRK = BDSPAC + 2*M*M + 2*M + ELSE IF( WNTQS ) THEN + * + * Path 3t (N much larger than M, JOBZ='S') +@@ -365,9 +364,9 @@ + $ ILAENV( 1, 'SORMBR', 'QLN', M, M, M, -1 ) ) + WRKBL = MAX( WRKBL, 3*M+M* + $ ILAENV( 1, 'SORMBR', 'PRT', M, M, M, -1 ) ) +- WRKBL = MAX( WRKBL, BDSPAC+3*M ) ++ WRKBL = MAX( WRKBL, BDSPAC+2*M ) + MAXWRK = WRKBL + M*M +- MINWRK = BDSPAC + M*M + 3*M ++ MINWRK = BDSPAC + M*M + 2*M + ELSE IF( WNTQA ) THEN + * + * Path 4t (N much larger than M, JOBZ='A') +@@ -381,9 +380,9 @@ + $ ILAENV( 1, 'SORMBR', 'QLN', M, M, M, -1 ) ) + WRKBL = MAX( WRKBL, 3*M+M* + $ ILAENV( 1, 'SORMBR', 'PRT', M, M, M, -1 ) ) +- WRKBL = MAX( WRKBL, BDSPAC+3*M ) ++ WRKBL = MAX( WRKBL, BDSPAC+2*M ) + MAXWRK = WRKBL + M*M +- MINWRK = BDSPAC + M*M + 3*M ++ MINWRK = BDSPAC + M*M + M + N + END IF + ELSE + * +@@ -391,52 +390,46 @@ + * + WRKBL = 3*M + ( M+N )*ILAENV( 1, 'SGEBRD', ' ', M, N, -1, + $ -1 ) +- IF( WNTQN ) THEN +- MAXWRK = MAX( WRKBL, BDSPAC+3*M ) +- MINWRK = 3*M + MAX( N, BDSPAC ) +- ELSE IF( WNTQO ) THEN ++ IF( WNTQO ) THEN + WRKBL = MAX( WRKBL, 3*M+M* + $ ILAENV( 1, 'SORMBR', 'QLN', M, M, N, -1 ) ) + WRKBL = MAX( WRKBL, 3*M+M* + $ ILAENV( 1, 'SORMBR', 'PRT', M, N, M, -1 ) ) +- WRKBL = MAX( WRKBL, BDSPAC+3*M ) ++ WRKBL = MAX( WRKBL, BDSPAC+2*M ) + MAXWRK = WRKBL + M*N +- MINWRK = 3*M + MAX( N, M*M+BDSPAC ) ++ MINWRK = BDSPAC + M*M + 2*M + N + ELSE IF( WNTQS ) THEN +- WRKBL = MAX( WRKBL, 3*M+M* +- $ ILAENV( 1, 'SORMBR', 'QLN', M, M, N, -1 ) ) +- WRKBL = MAX( WRKBL, 3*M+M* +- $ ILAENV( 1, 'SORMBR', 'PRT', M, N, M, -1 ) ) +- MAXWRK = MAX( WRKBL, BDSPAC+3*M ) +- MINWRK = 3*M + MAX( N, BDSPAC ) ++ MAXWRK = MAX( MAXWRK, 3*M+M* ++ $ ILAENV( 1, 'SORMBR', 'QLN', M, M, N, -1 ) ) ++ MAXWRK = MAX( MAXWRK, 3*M+M* ++ $ ILAENV( 1, 'SORMBR', 'PRT', M, N, M, -1 ) ) ++ MAXWRK = MAX( MAXWRK, BDSPAC+2*M ) ++ MINWRK = BDSPAC + 2*M + N + ELSE IF( WNTQA ) THEN +- WRKBL = MAX( WRKBL, 3*M+M* +- $ ILAENV( 1, 'SORMBR', 'QLN', M, M, N, -1 ) ) +- WRKBL = MAX( WRKBL, 3*M+M* +- $ ILAENV( 1, 'SORMBR', 'PRT', N, N, M, -1 ) ) +- MAXWRK = MAX( WRKBL, BDSPAC+3*M ) +- MINWRK = 3*M + MAX( N, BDSPAC ) ++ MAXWRK = MAX( MAXWRK, 3*M+M* ++ $ ILAENV( 1, 'SORMBR', 'QLN', M, M, N, -1 ) ) ++ MAXWRK = MAX( MAXWRK, 3*M+N* ++ $ ILAENV( 1, 'SORMBR', 'PRT', N, N, M, -1 ) ) ++ MAXWRK = MAX( MAXWRK, BDSPAC+2*M ) ++ MINWRK = BDSPAC + 2*M + N + END IF + END IF + END IF ++ END IF ++ IF( INFO.EQ.0 ) THEN + WORK( 1 ) = MAXWRK ++ IF( LWORK.LT.MINWRK .AND. LWORK.NE.LQUERV ) ++ $ INFO = -12 + END IF + * +- IF( LWORK.LT.MINWRK .AND. .NOT.LQUERY ) THEN +- INFO = -12 +- END IF ++* Quick returns ++* + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'SGESDD', -INFO ) + RETURN +- ELSE IF( LQUERY ) THEN +- RETURN + END IF +-* +-* Quick return if possible +-* ++ IF( LWORK.EQ.LQUERV ) RETURN + IF( M.EQ.0 .OR. N.EQ.0 ) THEN +- IF( LWORK.GE.1 ) +- $ WORK( 1 ) = ONE + RETURN + END IF + * +@@ -497,7 +490,7 @@ + NWORK = IE + N + * + * Perform bidiagonal SVD, computing singular values only +-* (Workspace: need N+BDSPAC) ++* (Workspace: need BDSPAN) + * + CALL SBDSDC( 'U', 'N', N, S, WORK( IE ), DUM, 1, DUM, 1, + $ DUM, IDUM, WORK( NWORK ), IWORK, INFO ) +@@ -512,10 +505,10 @@ + * + * WORK(IR) is LDWRKR by N + * +- IF( LWORK.GE.LDA*N+N*N+3*N+BDSPAC ) THEN ++ IF( LWORK.GE.LDA*N+4*N*N+9*N ) THEN + LDWRKR = LDA + ELSE +- LDWRKR = ( LWORK-N*N-3*N-BDSPAC ) / N ++ LDWRKR = ( LWORK-4*N*N-9*N ) / N + END IF + ITAU = IR + LDWRKR*N + NWORK = ITAU + N +@@ -557,7 +550,7 @@ + * Perform bidiagonal SVD, computing left singular vectors + * of bidiagonal matrix in WORK(IU) and computing right + * singular vectors of bidiagonal matrix in VT +-* (Workspace: need N+N*N+BDSPAC) ++* (Workspace: need 2*N*N+BDSPAC) + * + CALL SBDSDC( 'U', 'I', N, S, WORK( IE ), WORK( IU ), N, + $ VT, LDVT, DUM, IDUM, WORK( NWORK ), IWORK, +@@ -633,7 +626,7 @@ + * Perform bidiagonal SVD, computing left singular vectors + * of bidiagoal matrix in U and computing right singular + * vectors of bidiagonal matrix in VT +-* (Workspace: need N+BDSPAC) ++* (Workspace: need N*N+BDSPAC) + * + CALL SBDSDC( 'U', 'I', N, S, WORK( IE ), U, LDU, VT, + $ LDVT, DUM, IDUM, WORK( NWORK ), IWORK, +@@ -681,7 +674,7 @@ + CALL SLACPY( 'L', M, N, A, LDA, U, LDU ) + * + * Generate Q in U +-* (Workspace: need N*N+2*N, prefer N*N+N+N*NB) ++* (Workspace: need N*N+N+M, prefer N*N+N+M*NB) + CALL SORGQR( M, M, N, U, LDU, WORK( ITAU ), + $ WORK( NWORK ), LWORK-NWORK+1, IERR ) + * +@@ -703,7 +696,7 @@ + * Perform bidiagonal SVD, computing left singular vectors + * of bidiagonal matrix in WORK(IU) and computing right + * singular vectors of bidiagonal matrix in VT +-* (Workspace: need N+N*N+BDSPAC) ++* (Workspace: need N*N+BDSPAC) + * + CALL SBDSDC( 'U', 'I', N, S, WORK( IE ), WORK( IU ), N, + $ VT, LDVT, DUM, IDUM, WORK( NWORK ), IWORK, +@@ -754,13 +747,13 @@ + IF( WNTQN ) THEN + * + * Perform bidiagonal SVD, only computing singular values +-* (Workspace: need N+BDSPAC) ++* (Workspace: need BDSPAN) + * + CALL SBDSDC( 'U', 'N', N, S, WORK( IE ), DUM, 1, DUM, 1, + $ DUM, IDUM, WORK( NWORK ), IWORK, INFO ) + ELSE IF( WNTQO ) THEN + IU = NWORK +- IF( LWORK.GE.M*N+3*N+BDSPAC ) THEN ++ IF( LWORK.GE.M*N+3*N*N+9*N ) THEN + * + * WORK( IU ) is M by N + * +@@ -785,7 +778,7 @@ + * Perform bidiagonal SVD, computing left singular vectors + * of bidiagonal matrix in WORK(IU) and computing right + * singular vectors of bidiagonal matrix in VT +-* (Workspace: need N+N*N+BDSPAC) ++* (Workspace: need N*N+BDSPAC) + * + CALL SBDSDC( 'U', 'I', N, S, WORK( IE ), WORK( IU ), + $ LDWRKU, VT, LDVT, DUM, IDUM, WORK( NWORK ), +@@ -798,7 +791,7 @@ + $ WORK( ITAUP ), VT, LDVT, WORK( NWORK ), + $ LWORK-NWORK+1, IERR ) + * +- IF( LWORK.GE.M*N+3*N+BDSPAC ) THEN ++ IF( LWORK.GE.M*N+3*N*N+9*N ) THEN + * + * Overwrite WORK(IU) by left singular vectors of A + * (Workspace: need N*N+2*N, prefer N*N+N+N*NB) +@@ -838,7 +831,7 @@ + * Perform bidiagonal SVD, computing left singular vectors + * of bidiagonal matrix in U and computing right singular + * vectors of bidiagonal matrix in VT +-* (Workspace: need N+BDSPAC) ++* (Workspace: need BDSPAC) + * + CALL SLASET( 'F', M, N, ZERO, ZERO, U, LDU ) + CALL SBDSDC( 'U', 'I', N, S, WORK( IE ), U, LDU, VT, +@@ -855,12 +848,12 @@ + CALL SORMBR( 'P', 'R', 'T', N, N, N, A, LDA, + $ WORK( ITAUP ), VT, LDVT, WORK( NWORK ), + $ LWORK-NWORK+1, IERR ) +- ELSE IF( WNTQA ) THEN ++ ELSE + * + * Perform bidiagonal SVD, computing left singular vectors + * of bidiagonal matrix in U and computing right singular + * vectors of bidiagonal matrix in VT +-* (Workspace: need N+BDSPAC) ++* (Workspace: need BDSPAC) + * + CALL SLASET( 'F', M, M, ZERO, ZERO, U, LDU ) + CALL SBDSDC( 'U', 'I', N, S, WORK( IE ), U, LDU, VT, +@@ -925,7 +918,7 @@ + NWORK = IE + M + * + * Perform bidiagonal SVD, computing singular values only +-* (Workspace: need M+BDSPAC) ++* (Workspace: need BDSPAN) + * + CALL SBDSDC( 'U', 'N', M, S, WORK( IE ), DUM, 1, DUM, 1, + $ DUM, IDUM, WORK( NWORK ), IWORK, INFO ) +@@ -941,7 +934,7 @@ + * IVT is M by M + * + IL = IVT + M*M +- IF( LWORK.GE.M*N+M*M+3*M+BDSPAC ) THEN ++ IF( LWORK.GE.M*N+4*M*M+9*M ) THEN + * + * WORK(IL) is M by N + * +@@ -986,7 +979,7 @@ + * Perform bidiagonal SVD, computing left singular vectors + * of bidiagonal matrix in U, and computing right singular + * vectors of bidiagonal matrix in WORK(IVT) +-* (Workspace: need M+M*M+BDSPAC) ++* (Workspace: need 2*M*M+BDSPAC) + * + CALL SBDSDC( 'U', 'I', M, S, WORK( IE ), U, LDU, + $ WORK( IVT ), M, DUM, IDUM, WORK( NWORK ), +@@ -1061,7 +1054,7 @@ + * Perform bidiagonal SVD, computing left singular vectors + * of bidiagonal matrix in U and computing right singular + * vectors of bidiagonal matrix in VT +-* (Workspace: need M+BDSPAC) ++* (Workspace: need M*M+BDSPAC) + * + CALL SBDSDC( 'U', 'I', M, S, WORK( IE ), U, LDU, VT, + $ LDVT, DUM, IDUM, WORK( NWORK ), IWORK, +@@ -1108,7 +1101,7 @@ + CALL SLACPY( 'U', M, N, A, LDA, VT, LDVT ) + * + * Generate Q in VT +-* (Workspace: need M*M+2*M, prefer M*M+M+M*NB) ++* (Workspace: need M*M+M+N, prefer M*M+M+N*NB) + * + CALL SORGLQ( N, N, M, VT, LDVT, WORK( ITAU ), + $ WORK( NWORK ), LWORK-NWORK+1, IERR ) +@@ -1131,7 +1124,7 @@ + * Perform bidiagonal SVD, computing left singular vectors + * of bidiagonal matrix in U and computing right singular + * vectors of bidiagonal matrix in WORK(IVT) +-* (Workspace: need M+M*M+BDSPAC) ++* (Workspace: need M*M+BDSPAC) + * + CALL SBDSDC( 'U', 'I', M, S, WORK( IE ), U, LDU, + $ WORK( IVT ), LDWKVT, DUM, IDUM, +@@ -1182,14 +1175,14 @@ + IF( WNTQN ) THEN + * + * Perform bidiagonal SVD, only computing singular values +-* (Workspace: need M+BDSPAC) ++* (Workspace: need BDSPAN) + * + CALL SBDSDC( 'L', 'N', M, S, WORK( IE ), DUM, 1, DUM, 1, + $ DUM, IDUM, WORK( NWORK ), IWORK, INFO ) + ELSE IF( WNTQO ) THEN + LDWKVT = M + IVT = NWORK +- IF( LWORK.GE.M*N+3*M+BDSPAC ) THEN ++ IF( LWORK.GE.M*N+3*M*M+9*M ) THEN + * + * WORK( IVT ) is M by N + * +@@ -1224,7 +1217,7 @@ + $ WORK( ITAUQ ), U, LDU, WORK( NWORK ), + $ LWORK-NWORK+1, IERR ) + * +- IF( LWORK.GE.M*N+3*M+BDSPAC ) THEN ++ IF( LWORK.GE.M*N+3*M*M+9*M ) THEN + * + * Overwrite WORK(IVT) by left singular vectors of A + * (Workspace: need M*M+2*M, prefer M*M+M+M*NB) +@@ -1263,7 +1256,7 @@ + * Perform bidiagonal SVD, computing left singular vectors + * of bidiagonal matrix in U and computing right singular + * vectors of bidiagonal matrix in VT +-* (Workspace: need M+BDSPAC) ++* (Workspace: need BDSPAC) + * + CALL SLASET( 'F', M, N, ZERO, ZERO, VT, LDVT ) + CALL SBDSDC( 'L', 'I', M, S, WORK( IE ), U, LDU, VT, +@@ -1280,12 +1273,12 @@ + CALL SORMBR( 'P', 'R', 'T', M, N, M, A, LDA, + $ WORK( ITAUP ), VT, LDVT, WORK( NWORK ), + $ LWORK-NWORK+1, IERR ) +- ELSE IF( WNTQA ) THEN ++ ELSE + * + * Perform bidiagonal SVD, computing left singular vectors + * of bidiagonal matrix in U and computing right singular + * vectors of bidiagonal matrix in VT +-* (Workspace: need M+BDSPAC) ++* (Workspace: need BDSPAC) + * + CALL SLASET( 'F', N, N, ZERO, ZERO, VT, LDVT ) + CALL SBDSDC( 'L', 'I', M, S, WORK( IE ), U, LDU, VT, +@@ -1319,9 +1312,15 @@ + IF( ANRM.GT.BIGNUM ) + $ CALL SLASCL( 'G', 0, 0, BIGNUM, ANRM, MINMN, 1, S, MINMN, + $ IERR ) ++ IF( INFO.NE.0 .AND. ANRM.GT.BIGNUM ) ++ $ CALL SLASCL( 'G', 0, 0, BIGNUM, ANRM, MINMN-1, 1, WORK( 2 ), ++ $ MINMN, IERR ) + IF( ANRM.LT.SMLNUM ) + $ CALL SLASCL( 'G', 0, 0, SMLNUM, ANRM, MINMN, 1, S, MINMN, + $ IERR ) ++ IF( INFO.NE.0 .AND. ANRM.LT.SMLNUM ) ++ $ CALL SLASCL( 'G', 0, 0, SMLNUM, ANRM, MINMN-1, 1, WORK( 2 ), ++ $ MINMN, IERR ) + END IF + * + * Return optimal workspace in WORK(1) +diff -uNr LAPACK.orig/SRC/sgesvd.f LAPACK/SRC/sgesvd.f +--- LAPACK.orig/SRC/sgesvd.f Thu Nov 4 14:23:35 1999 ++++ LAPACK/SRC/sgesvd.f Fri May 25 16:08:20 2001 +@@ -4,7 +4,8 @@ + * -- LAPACK driver routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* October 31, 1999 ++* June 30, 1999 ++* 8-15-00: Improve consistency of WS calculations (eca) + * + * .. Scalar Arguments .. + CHARACTER JOBU, JOBVT +@@ -118,10 +119,9 @@ + * LWORK >= MAX(3*MIN(M,N)+MAX(M,N),5*MIN(M,N)). + * For good performance, LWORK should generally be larger. + * +-* If LWORK = -1, then a workspace query is assumed; the routine +-* only calculates the optimal size of the WORK array, returns +-* this value as the first entry of the WORK array, and no error +-* message related to LWORK is issued by XERBLA. ++* If LWORK = -1, a workspace query is assumed. The optimal ++* size for the WORK array is calculated and stored in WORK(1), ++* and no other work except argument checking is performed. + * + * INFO (output) INTEGER + * = 0: successful exit. +@@ -134,12 +134,14 @@ + * ===================================================================== + * + * .. Parameters .. ++ INTEGER LQUERV ++ PARAMETER ( LQUERV = -1 ) + REAL ZERO, ONE + PARAMETER ( ZERO = 0.0E0, ONE = 1.0E0 ) + * .. + * .. Local Scalars .. +- LOGICAL LQUERY, WNTUA, WNTUAS, WNTUN, WNTUO, WNTUS, +- $ WNTVA, WNTVAS, WNTVN, WNTVO, WNTVS ++ LOGICAL WNTUA, WNTUAS, WNTUN, WNTUO, WNTUS, WNTVA, ++ $ WNTVAS, WNTVN, WNTVO, WNTVS + INTEGER BDSPAC, BLK, CHUNK, I, IE, IERR, IR, ISCL, + $ ITAU, ITAUP, ITAUQ, IU, IWORK, LDWRKR, LDWRKU, + $ MAXWRK, MINMN, MINWRK, MNTHR, NCU, NCVT, NRU, +@@ -181,7 +183,7 @@ + WNTVO = LSAME( JOBVT, 'O' ) + WNTVN = LSAME( JOBVT, 'N' ) + MINWRK = 1 +- LQUERY = ( LWORK.EQ.-1 ) ++ MAXWRK = 1 + * + IF( .NOT.( WNTUA .OR. WNTUS .OR. WNTUO .OR. WNTUN ) ) THEN + INFO = -1 +@@ -208,8 +210,7 @@ + * NB refers to the optimal block size for the immediately + * following subroutine, as returned by ILAENV.) + * +- IF( INFO.EQ.0 .AND. ( LWORK.GE.1 .OR. LQUERY ) .AND. M.GT.0 .AND. +- $ N.GT.0 ) THEN ++ IF( INFO.EQ.0 .AND. M.GT.0 .AND. N.GT.0 ) THEN + IF( M.GE.N ) THEN + * + * Compute space needed for SBDSQR +@@ -557,24 +558,21 @@ + MAXWRK = MAX( MAXWRK, MINWRK ) + END IF + END IF ++ END IF ++ IF( INFO.EQ.0 ) THEN + WORK( 1 ) = MAXWRK ++ IF( LWORK.LT.MINWRK .AND. LWORK.NE.LQUERV ) ++ $ INFO = -13 + END IF + * +- IF( LWORK.LT.MINWRK .AND. .NOT.LQUERY ) THEN +- INFO = -13 +- END IF ++* Quick returns ++* + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'SGESVD', -INFO ) + RETURN +- ELSE IF( LQUERY ) THEN +- RETURN + END IF +-* +-* Quick return if possible +-* ++ IF( LWORK.EQ.LQUERV ) RETURN + IF( M.EQ.0 .OR. N.EQ.0 ) THEN +- IF( LWORK.GE.1 ) +- $ WORK( 1 ) = ONE + RETURN + END IF + * +diff -uNr LAPACK.orig/SRC/sggbak.f LAPACK/SRC/sggbak.f +--- LAPACK.orig/SRC/sggbak.f Thu Nov 4 14:23:36 1999 ++++ LAPACK/SRC/sggbak.f Fri May 25 16:08:51 2001 +@@ -4,7 +4,7 @@ + * -- LAPACK routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* September 30, 1994 ++* February 1, 2001 + * + * .. Scalar Arguments .. + CHARACTER JOB, SIDE +@@ -108,10 +108,15 @@ + INFO = -3 + ELSE IF( ILO.LT.1 ) THEN + INFO = -4 +- ELSE IF( IHI.LT.ILO .OR. IHI.GT.MAX( 1, N ) ) THEN ++ ELSE IF( N.EQ.0 .AND. IHI.EQ.0 .AND. ILO.NE.1 ) THEN ++ INFO = -4 ++ ELSE IF( N.GT.0 .AND. ( IHI.LT.ILO .OR. IHI.GT.MAX( 1, N ) ) ) ++ $ THEN ++ INFO = -5 ++ ELSE IF( N.EQ.0 .AND. ILO.EQ.1 .AND. IHI.NE.0 ) THEN + INFO = -5 + ELSE IF( M.LT.0 ) THEN +- INFO = -6 ++ INFO = -8 + ELSE IF( LDV.LT.MAX( 1, N ) ) THEN + INFO = -10 + END IF +diff -uNr LAPACK.orig/SRC/sggbal.f LAPACK/SRC/sggbal.f +--- LAPACK.orig/SRC/sggbal.f Thu Nov 4 14:25:42 1999 ++++ LAPACK/SRC/sggbal.f Fri May 25 16:09:11 2001 +@@ -4,7 +4,7 @@ + * -- LAPACK routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* September 30, 1994 ++* April 12, 2001 + * + * .. Scalar Arguments .. + CHARACTER JOB +@@ -141,7 +141,7 @@ + ELSE IF( LDA.LT.MAX( 1, N ) ) THEN + INFO = -4 + ELSE IF( LDB.LT.MAX( 1, N ) ) THEN +- INFO = -5 ++ INFO = -6 + END IF + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'SGGBAL', -INFO ) +@@ -188,8 +188,8 @@ + IF( L.NE.1 ) + $ GO TO 30 + * +- RSCALE( 1 ) = 1 +- LSCALE( 1 ) = 1 ++ RSCALE( 1 ) = ONE ++ LSCALE( 1 ) = ONE + GO TO 190 + * + 30 CONTINUE +@@ -247,7 +247,7 @@ + * Permute rows M and I + * + 160 CONTINUE +- LSCALE( M ) = I ++ LSCALE( M ) = REAL( I ) + IF( I.EQ.M ) + $ GO TO 170 + CALL SSWAP( N-K+1, A( I, K ), LDA, A( M, K ), LDA ) +@@ -256,7 +256,7 @@ + * Permute columns M and J + * + 170 CONTINUE +- RSCALE( M ) = J ++ RSCALE( M ) = REAL( J ) + IF( J.EQ.M ) + $ GO TO 180 + CALL SSWAP( L, A( 1, J ), 1, A( 1, M ), 1 ) +@@ -424,7 +424,7 @@ + DO 360 I = ILO, IHI + IRAB = ISAMAX( N-ILO+1, A( I, ILO ), LDA ) + RAB = ABS( A( I, IRAB+ILO-1 ) ) +- IRAB = ISAMAX( N-ILO+1, B( I, ILO ), LDA ) ++ IRAB = ISAMAX( N-ILO+1, B( I, ILO ), LDB ) + RAB = MAX( RAB, ABS( B( I, IRAB+ILO-1 ) ) ) + LRAB = INT( LOG10( RAB+SFMIN ) / BASL+ONE ) + IR = LSCALE( I ) + SIGN( HALF, LSCALE( I ) ) +diff -uNr LAPACK.orig/SRC/sgges.f LAPACK/SRC/sgges.f +--- LAPACK.orig/SRC/sgges.f Thu Nov 4 14:26:20 1999 ++++ LAPACK/SRC/sgges.f Fri May 25 16:09:33 2001 +@@ -6,6 +6,7 @@ + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University + * June 30, 1999 ++* 8-15-00: Improve consistency of WS calculations (eca) + * + * .. Scalar Arguments .. + CHARACTER JOBVSL, JOBVSR, SORT +@@ -158,10 +159,9 @@ + * LWORK (input) INTEGER + * The dimension of the array WORK. LWORK >= 8*N+16. + * +-* If LWORK = -1, then a workspace query is assumed; the routine +-* only calculates the optimal size of the WORK array, returns +-* this value as the first entry of the WORK array, and no error +-* message related to LWORK is issued by XERBLA. ++* If LWORK = -1, a workspace query is assumed. The optimal ++* size for the WORK array is calculated and stored in WORK(1), ++* and no other work except argument checking is performed. + * + * BWORK (workspace) LOGICAL array, dimension (N) + * Not referenced if SORT = 'N'. +@@ -184,12 +184,14 @@ + * ===================================================================== + * + * .. Parameters .. ++ INTEGER LQUERV ++ PARAMETER ( LQUERV = -1 ) + REAL ZERO, ONE + PARAMETER ( ZERO = 0.0E+0, ONE = 1.0E+0 ) + * .. + * .. Local Scalars .. + LOGICAL CURSL, ILASCL, ILBSCL, ILVSL, ILVSR, LASTSL, +- $ LQUERY, LST2SL, WANTST ++ $ LST2SL, WANTST + INTEGER I, ICOLS, IERR, IHI, IJOBVL, IJOBVR, ILEFT, + $ ILO, IP, IRIGHT, IROWS, ITAU, IWRK, MAXWRK, + $ MINWRK +@@ -245,7 +247,6 @@ + * Test the input arguments + * + INFO = 0 +- LQUERY = ( LWORK.EQ.-1 ) + IF( IJOBVL.LE.0 ) THEN + INFO = -1 + ELSE IF( IJOBVR.LE.0 ) THEN +@@ -272,7 +273,7 @@ + * following subroutine, as returned by ILAENV.) + * + MINWRK = 1 +- IF( INFO.EQ.0 .AND. ( LWORK.GE.1 .OR. LQUERY ) ) THEN ++ IF( INFO.EQ.0 ) THEN + MINWRK = 7*( N+1 ) + 16 + MAXWRK = 7*( N+1 ) + N*ILAENV( 1, 'SGEQRF', ' ', N, 1, N, 0 ) + + $ 16 +@@ -281,19 +282,17 @@ + $ ILAENV( 1, 'SORGQR', ' ', N, 1, N, -1 ) ) + END IF + WORK( 1 ) = MAXWRK ++ IF( LWORK.LT.MINWRK .AND. LWORK.NE.LQUERV ) ++ $ INFO = -19 + END IF + * +- IF( LWORK.LT.MINWRK .AND. .NOT.LQUERY ) +- $ INFO = -19 ++* Quick returns ++* + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'SGGES ', -INFO ) + RETURN +- ELSE IF( LQUERY ) THEN +- RETURN + END IF +-* +-* Quick return if possible +-* ++ IF( LWORK.EQ.LQUERV ) RETURN + IF( N.EQ.0 ) THEN + SDIM = 0 + RETURN +diff -uNr LAPACK.orig/SRC/sggesx.f LAPACK/SRC/sggesx.f +--- LAPACK.orig/SRC/sggesx.f Thu Nov 4 14:26:20 1999 ++++ LAPACK/SRC/sggesx.f Fri May 25 16:09:52 2001 +@@ -7,6 +7,7 @@ + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University + * June 30, 1999 ++* 8-15-00: Do WS calculations if LWORK = -1 (eca) + * + * .. Scalar Arguments .. + CHARACTER JOBVSL, JOBVSR, SENSE, SORT +@@ -185,6 +186,10 @@ + * If SENSE = 'E', 'V', or 'B', + * LWORK >= MAX( 8*(N+1)+16, 2*SDIM*(N-SDIM) ). + * ++* If LWORK = -1, a workspace query is assumed. The optimal ++* size for the WORK array is calculated and stored in WORK(1), ++* and no other work except argument checking is performed. ++* + * IWORK (workspace) INTEGER array, dimension (LIWORK) + * Not referenced if SENSE = 'N'. + * +@@ -227,6 +232,8 @@ + * ===================================================================== + * + * .. Parameters .. ++ INTEGER LQUERV ++ PARAMETER ( LQUERV = -1 ) + REAL ZERO, ONE + PARAMETER ( ZERO = 0.0E+0, ONE = 1.0E+0 ) + * .. +@@ -330,7 +337,7 @@ + * following subroutine, as returned by ILAENV.) + * + MINWRK = 1 +- IF( INFO.EQ.0 .AND. LWORK.GE.1 ) THEN ++ IF( INFO.EQ.0 ) THEN + MINWRK = 8*( N+1 ) + 16 + MAXWRK = 7*( N+1 ) + N*ILAENV( 1, 'SGEQRF', ' ', N, 1, N, 0 ) + + $ 16 +@@ -338,7 +345,15 @@ + MAXWRK = MAX( MAXWRK, 8*( N+1 )+N* + $ ILAENV( 1, 'SORGQR', ' ', N, 1, N, -1 )+16 ) + END IF ++* ++* Estimate the workspace needed by STGSEN. ++* ++ IF( WANTST ) THEN ++ MAXWRK = MAX( MAXWRK, 2*N+(N*N+1)/2 ) ++ END IF + WORK( 1 ) = MAXWRK ++ IF( LWORK.LT.MINWRK .AND. LWORK.NE.LQUERV ) ++ $ INFO = -22 + END IF + IF( .NOT.WANTSN ) THEN + LIWMIN = 1 +@@ -346,21 +361,18 @@ + LIWMIN = N + 6 + END IF + IWORK( 1 ) = LIWMIN +-* +- IF( INFO.EQ.0 .AND. LWORK.LT.MINWRK ) THEN +- INFO = -22 +- ELSE IF( INFO.EQ.0 .AND. IJOB.GE.1 ) THEN ++ IF( INFO.EQ.0 .AND. IJOB.GE.1 ) THEN + IF( LIWORK.LT.LIWMIN ) + $ INFO = -24 + END IF + * ++* Quick returns ++* + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'SGGESX', -INFO ) + RETURN + END IF +-* +-* Quick return if possible +-* ++ IF( LWORK.EQ.LQUERV ) RETURN + IF( N.EQ.0 ) THEN + SDIM = 0 + RETURN +diff -uNr LAPACK.orig/SRC/sggev.f LAPACK/SRC/sggev.f +--- LAPACK.orig/SRC/sggev.f Thu Nov 4 14:26:20 1999 ++++ LAPACK/SRC/sggev.f Fri May 25 16:10:10 2001 +@@ -5,6 +5,7 @@ + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University + * June 30, 1999 ++* 8-15-00: Improve consistency of WS calculations (eca) + * + * .. Scalar Arguments .. + CHARACTER JOBVL, JOBVR +@@ -123,10 +124,9 @@ + * The dimension of the array WORK. LWORK >= max(1,8*N). + * For good performance, LWORK must generally be larger. + * +-* If LWORK = -1, then a workspace query is assumed; the routine +-* only calculates the optimal size of the WORK array, returns +-* this value as the first entry of the WORK array, and no error +-* message related to LWORK is issued by XERBLA. ++* If LWORK = -1, a workspace query is assumed. The optimal ++* size for the WORK array is calculated and stored in WORK(1), ++* and no other work except argument checking is performed. + * + * INFO (output) INTEGER + * = 0: successful exit +@@ -141,11 +141,13 @@ + * ===================================================================== + * + * .. Parameters .. ++ INTEGER LQUERV ++ PARAMETER ( LQUERV = -1 ) + REAL ZERO, ONE + PARAMETER ( ZERO = 0.0E+0, ONE = 1.0E+0 ) + * .. + * .. Local Scalars .. +- LOGICAL ILASCL, ILBSCL, ILV, ILVL, ILVR, LQUERY ++ LOGICAL ILASCL, ILBSCL, ILV, ILVL, ILVR + CHARACTER CHTEMP + INTEGER ICOLS, IERR, IHI, IJOBVL, IJOBVR, ILEFT, ILO, + $ IN, IRIGHT, IROWS, ITAU, IWRK, JC, JR, MAXWRK, +@@ -200,7 +202,6 @@ + * Test the input arguments + * + INFO = 0 +- LQUERY = ( LWORK.EQ.-1 ) + IF( IJOBVL.LE.0 ) THEN + INFO = -1 + ELSE IF( IJOBVR.LE.0 ) THEN +@@ -226,24 +227,21 @@ + * computed assuming ILO = 1 and IHI = N, the worst case.) + * + MINWRK = 1 +- IF( INFO.EQ.0 .AND. ( LWORK.GE.1 .OR. LQUERY ) ) THEN ++ IF( INFO.EQ.0 ) THEN + MAXWRK = 7*N + N*ILAENV( 1, 'SGEQRF', ' ', N, 1, N, 0 ) + MINWRK = MAX( 1, 8*N ) + WORK( 1 ) = MAXWRK ++ IF( LWORK.LT.MINWRK .AND. LWORK.NE.LQUERV ) ++ $ INFO = -16 + END IF + * +- IF( LWORK.LT.MINWRK .AND. .NOT.LQUERY ) +- $ INFO = -16 ++* Quick returns + * + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'SGGEV ', -INFO ) + RETURN +- ELSE IF( LQUERY ) THEN +- RETURN + END IF +-* +-* Quick return if possible +-* ++ IF( LWORK.EQ.LQUERV ) RETURN + IF( N.EQ.0 ) + $ RETURN + * +diff -uNr LAPACK.orig/SRC/sggevx.f LAPACK/SRC/sggevx.f +--- LAPACK.orig/SRC/sggevx.f Thu Nov 4 14:26:20 1999 ++++ LAPACK/SRC/sggevx.f Fri May 25 16:11:25 2001 +@@ -7,6 +7,7 @@ + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University + * June 30, 1999 ++* 8-15-00: Improve consistency of WS calculations (eca) + * + * .. Scalar Arguments .. + CHARACTER BALANC, JOBVL, JOBVR, SENSE +@@ -212,10 +213,9 @@ + * If SENSE = 'E', LWORK >= 12*N. + * If SENSE = 'V' or 'B', LWORK >= 2*N*N+12*N+16. + * +-* If LWORK = -1, then a workspace query is assumed; the routine +-* only calculates the optimal size of the WORK array, returns +-* this value as the first entry of the WORK array, and no error +-* message related to LWORK is issued by XERBLA. ++* If LWORK = -1, a workspace query is assumed. The optimal ++* size for the WORK array is calculated and stored in WORK(1), ++* and no other work except argument checking is performed. + * + * IWORK (workspace) INTEGER array, dimension (N+6) + * If SENSE = 'E', IWORK is not referenced. +@@ -262,12 +262,14 @@ + * ===================================================================== + * + * .. Parameters .. ++ INTEGER LQUERV ++ PARAMETER ( LQUERV = -1 ) + REAL ZERO, ONE + PARAMETER ( ZERO = 0.0E+0, ONE = 1.0E+0 ) + * .. + * .. Local Scalars .. +- LOGICAL ILASCL, ILBSCL, ILV, ILVL, ILVR, LQUERY, PAIR, +- $ WANTSB, WANTSE, WANTSN, WANTSV ++ LOGICAL ILASCL, ILBSCL, ILV, ILVL, ILVR, PAIR, WANTSB, ++ $ WANTSE, WANTSN, WANTSV + CHARACTER CHTEMP + INTEGER I, ICOLS, IERR, IJOBVL, IJOBVR, IN, IROWS, + $ ITAU, IWRK, IWRK1, J, JC, JR, M, MAXWRK, +@@ -327,7 +329,6 @@ + * Test the input arguments + * + INFO = 0 +- LQUERY = ( LWORK.EQ.-1 ) + IF( .NOT.( LSAME( BALANC, 'N' ) .OR. LSAME( BALANC, + $ 'S' ) .OR. LSAME( BALANC, 'P' ) .OR. LSAME( BALANC, 'B' ) ) ) + $ THEN +@@ -360,7 +361,7 @@ + * computed assuming ILO = 1 and IHI = N, the worst case.) + * + MINWRK = 1 +- IF( INFO.EQ.0 .AND. ( LWORK.GE.1 .OR. LQUERY ) ) THEN ++ IF( INFO.EQ.0 ) THEN + MAXWRK = 5*N + N*ILAENV( 1, 'SGEQRF', ' ', N, 1, N, 0 ) + MINWRK = MAX( 1, 6*N ) + IF( WANTSE ) THEN +@@ -370,24 +371,19 @@ + MAXWRK = MAX( MAXWRK, 2*N*N+12*N+16 ) + END IF + WORK( 1 ) = MAXWRK ++ IF( LWORK.LT.MINWRK .AND. LWORK.NE.LQUERV ) ++ $ INFO = -26 + END IF + * +- IF( LWORK.LT.MINWRK .AND. .NOT.LQUERY ) THEN +- INFO = -26 +- END IF ++* Quick returns + * + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'SGGEVX', -INFO ) + RETURN +- ELSE IF( LQUERY ) THEN +- RETURN + END IF +-* +-* Quick return if possible +-* ++ IF( LWORK.EQ.LQUERV ) RETURN + IF( N.EQ.0 ) + $ RETURN +-* + * + * Get machine constants + * +diff -uNr LAPACK.orig/SRC/sgghrd.f LAPACK/SRC/sgghrd.f +--- LAPACK.orig/SRC/sgghrd.f Thu Nov 4 14:25:44 1999 ++++ LAPACK/SRC/sgghrd.f Fri May 25 16:11:45 2001 +@@ -4,7 +4,7 @@ + * -- LAPACK routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* September 30, 1994 ++* April 26, 2001 + * + * .. Scalar Arguments .. + CHARACTER COMPQ, COMPZ +@@ -20,16 +20,32 @@ + * + * SGGHRD reduces a pair of real matrices (A,B) to generalized upper + * Hessenberg form using orthogonal transformations, where A is a +-* general matrix and B is upper triangular: Q' * A * Z = H and +-* Q' * B * Z = T, where H is upper Hessenberg, T is upper triangular, +-* and Q and Z are orthogonal, and ' means transpose. ++* general matrix and B is upper triangular. The form of the ++* generalized eigenvalue problem is ++* A*x = lambda*B*x, ++* and B is typically made upper triangular by computing its QR ++* factorization and moving the orthogonal matrix Q to the left side ++* of the equation. ++* ++* This subroutine simultaneously reduces A to a Hessenberg matrix H: ++* Q**T*A*Z = H ++* and transforms B to another upper triangular matrix T: ++* Q**T*B*Z = T ++* in order to reduce the problem to its standard form ++* H*y = lambda*T*y ++* where y = Z**T*x. + * + * The orthogonal matrices Q and Z are determined as products of Givens + * rotations. They may either be formed explicitly, or they may be +-* postmultiplied into input matrices Q1 and Z1, so that ++* postmultiplied into input matrices Q1 and Z1, so that + * +-* Q1 * A * Z1' = (Q1*Q) * H * (Z1*Z)' +-* Q1 * B * Z1' = (Q1*Q) * T * (Z1*Z)' ++* Q1 * A * Z1**T = (Q1*Q) * H * (Z1*Z)**T ++* ++* Q1 * B * Z1**T = (Q1*Q) * T * (Z1*Z)**T ++* ++* If Q1 is the orthogonal matrix from the QR factorization of B in the ++* original equation A*x = lambda*B*x, then SGGHRD reduces the original ++* problem to generalized Hessenberg form. + * + * Arguments + * ========= +@@ -53,10 +69,11 @@ + * + * ILO (input) INTEGER + * IHI (input) INTEGER +-* It is assumed that A is already upper triangular in rows and +-* columns 1:ILO-1 and IHI+1:N. ILO and IHI are normally set +-* by a previous call to SGGBAL; otherwise they should be set +-* to 1 and N respectively. ++* ILO and IHI mark the rows and columns of A which are to be ++* reduced. It is assumed that A is already upper triangular ++* in rows and columns 1:ILO-1 and IHI+1:N. ILO and IHI are ++* normally set by a previous call to SGGBAL; otherwise they ++* should be set to 1 and N respectively. + * 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0. + * + * A (input/output) REAL array, dimension (LDA, N) +@@ -70,33 +87,28 @@ + * + * B (input/output) REAL array, dimension (LDB, N) + * On entry, the N-by-N upper triangular matrix B. +-* On exit, the upper triangular matrix T = Q' B Z. The ++* On exit, the upper triangular matrix T = Q**T B Z. The + * elements below the diagonal are set to zero. + * + * LDB (input) INTEGER + * The leading dimension of the array B. LDB >= max(1,N). + * + * Q (input/output) REAL array, dimension (LDQ, N) +-* If COMPQ='N': Q is not referenced. +-* If COMPQ='I': on entry, Q need not be set, and on exit it +-* contains the orthogonal matrix Q, where Q' +-* is the product of the Givens transformations +-* which are applied to A and B on the left. +-* If COMPQ='V': on entry, Q must contain an orthogonal matrix +-* Q1, and on exit this is overwritten by Q1*Q. ++* On entry, if COMPQ = 'V', the orthogonal matrix Q1, ++* typically from the QR factorization of B. ++* On exit, if COMPQ='I', the orthogonal matrix Q, and if ++* COMPQ = 'V', the product Q1*Q. ++* Not referenced if COMPQ='N'. + * + * LDQ (input) INTEGER + * The leading dimension of the array Q. + * LDQ >= N if COMPQ='V' or 'I'; LDQ >= 1 otherwise. + * + * Z (input/output) REAL array, dimension (LDZ, N) +-* If COMPZ='N': Z is not referenced. +-* If COMPZ='I': on entry, Z need not be set, and on exit it +-* contains the orthogonal matrix Z, which is +-* the product of the Givens transformations +-* which are applied to A and B on the right. +-* If COMPZ='V': on entry, Z must contain an orthogonal matrix +-* Z1, and on exit this is overwritten by Z1*Z. ++* On entry, if COMPZ = 'V', the orthogonal matrix Z1. ++* On exit, if COMPZ='I', the orthogonal matrix Z, and if ++* COMPZ = 'V', the product Z1*Z. ++* Not referenced if COMPZ='N'. + * + * LDZ (input) INTEGER + * The leading dimension of the array Z. +diff -uNr LAPACK.orig/SRC/shgeqz.f LAPACK/SRC/shgeqz.f +--- LAPACK.orig/SRC/shgeqz.f Thu Nov 4 14:23:36 1999 ++++ LAPACK/SRC/shgeqz.f Fri May 25 16:12:05 2001 +@@ -1,56 +1,75 @@ +- SUBROUTINE SHGEQZ( JOB, COMPQ, COMPZ, N, ILO, IHI, A, LDA, B, LDB, ++ SUBROUTINE SHGEQZ( JOB, COMPQ, COMPZ, N, ILO, IHI, H, LDH, T, LDT, + $ ALPHAR, ALPHAI, BETA, Q, LDQ, Z, LDZ, WORK, + $ LWORK, INFO ) + * + * -- LAPACK routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* June 30, 1999 ++* May 3, 2001 + * + * .. Scalar Arguments .. + CHARACTER COMPQ, COMPZ, JOB +- INTEGER IHI, ILO, INFO, LDA, LDB, LDQ, LDZ, LWORK, N ++ INTEGER IHI, ILO, INFO, LDH, LDQ, LDT, LDZ, LWORK, N + * .. + * .. Array Arguments .. +- REAL A( LDA, * ), ALPHAI( * ), ALPHAR( * ), +- $ B( LDB, * ), BETA( * ), Q( LDQ, * ), WORK( * ), +- $ Z( LDZ, * ) ++ REAL ALPHAI( * ), ALPHAR( * ), BETA( * ), ++ $ H( LDH, * ), Q( LDQ, * ), T( LDT, * ), ++ $ WORK( * ), Z( LDZ, * ) + * .. + * + * Purpose + * ======= + * +-* SHGEQZ implements a single-/double-shift version of the QZ method for +-* finding the generalized eigenvalues +-* +-* w(j)=(ALPHAR(j) + i*ALPHAI(j))/BETAR(j) of the equation +-* +-* det( A - w(i) B ) = 0 +-* +-* In addition, the pair A,B may be reduced to generalized Schur form: +-* B is upper triangular, and A is block upper triangular, where the +-* diagonal blocks are either 1-by-1 or 2-by-2, the 2-by-2 blocks having +-* complex generalized eigenvalues (see the description of the argument +-* JOB.) +-* +-* If JOB='S', then the pair (A,B) is simultaneously reduced to Schur +-* form by applying one orthogonal tranformation (usually called Q) on +-* the left and another (usually called Z) on the right. The 2-by-2 +-* upper-triangular diagonal blocks of B corresponding to 2-by-2 blocks +-* of A will be reduced to positive diagonal matrices. (I.e., +-* if A(j+1,j) is non-zero, then B(j+1,j)=B(j,j+1)=0 and B(j,j) and +-* B(j+1,j+1) will be positive.) +-* +-* If JOB='E', then at each iteration, the same transformations +-* are computed, but they are only applied to those parts of A and B +-* which are needed to compute ALPHAR, ALPHAI, and BETAR. +-* +-* If JOB='S' and COMPQ and COMPZ are 'V' or 'I', then the orthogonal +-* transformations used to reduce (A,B) are accumulated into the arrays +-* Q and Z s.t.: +-* +-* Q(in) A(in) Z(in)* = Q(out) A(out) Z(out)* +-* Q(in) B(in) Z(in)* = Q(out) B(out) Z(out)* ++* SHGEQZ computes the eigenvalues of a real matrix pair (H,T), ++* where H is an upper Hessenberg matrix and T is upper triangular, ++* using the double-shift QZ method. ++* Matrix pairs of this type are produced by the reduction to ++* generalized upper Hessenberg form of a real matrix pair (A,B): ++* ++* A = Q1*H*Z1**T, B = Q1*T*Z1**T, ++* ++* as computed by SGGHRD. ++* ++* If JOB='S', then the Hessenberg-triangular pair (H,T) is ++* also reduced to generalized Schur form, ++* ++* H = Q*S*Z**T, T = Q*P*Z**T, ++* ++* where Q and Z are orthogonal matrices, P is an upper triangular ++* matrix, and S is a quasi-triangular matrix with 1-by-1 and 2-by-2 ++* diagonal blocks. ++* ++* The 1-by-1 blocks correspond to real eigenvalues of the matrix pair ++* (H,T) and the 2-by-2 blocks correspond to complex conjugate pairs of ++* eigenvalues. ++* ++* Additionally, the 2-by-2 upper triangular diagonal blocks of P ++* corresponding to 2-by-2 blocks of S are reduced to positive diagonal ++* form, i.e., if S(j+1,j) is non-zero, then P(j+1,j) = P(j,j+1) = 0, ++* P(j,j) > 0, and P(j+1,j+1) > 0. ++* ++* Optionally, the orthogonal matrix Q from the generalized Schur ++* factorization may be postmultiplied into an input matrix Q1, and the ++* orthogonal matrix Z may be postmultiplied into an input matrix Z1. ++* If Q1 and Z1 are the orthogonal matrices from SGGHRD that reduced ++* the matrix pair (A,B) to generalized upper Hessenberg form, then the ++* output matrices Q1*Q and Z1*Z are the orthogonal factors from the ++* generalized Schur factorization of (A,B): ++* ++* A = (Q1*Q)*S*(Z1*Z)**T, B = (Q1*Q)*P*(Z1*Z)**T. ++* ++* To avoid overflow, eigenvalues of the matrix pair (H,T) (equivalently, ++* of (A,B)) are computed as a pair of values (alpha,beta), where alpha is ++* complex and beta real. ++* If beta is nonzero, lambda = alpha / beta is an eigenvalue of the ++* generalized nonsymmetric eigenvalue problem (GNEP) ++* A*x = lambda*B*x ++* and if alpha is nonzero, mu = beta / alpha is an eigenvalue of the ++* alternate form of the GNEP ++* mu*A*y = B*y. ++* Real eigenvalues can be read directly from the generalized Schur ++* form: ++* alpha = S(i,i), beta = P(i,i). + * + * Ref: C.B. Moler & G.W. Stewart, "An Algorithm for Generalized Matrix + * Eigenvalue Problems", SIAM J. Numer. Anal., 10(1973), +@@ -60,114 +79,98 @@ + * ========= + * + * JOB (input) CHARACTER*1 +-* = 'E': compute only ALPHAR, ALPHAI, and BETA. A and B will +-* not necessarily be put into generalized Schur form. +-* = 'S': put A and B into generalized Schur form, as well +-* as computing ALPHAR, ALPHAI, and BETA. ++* = 'E': Compute eigenvalues only; ++* = 'S': Compute eigenvalues and the Schur form. + * + * COMPQ (input) CHARACTER*1 +-* = 'N': do not modify Q. +-* = 'V': multiply the array Q on the right by the transpose of +-* the orthogonal tranformation that is applied to the +-* left side of A and B to reduce them to Schur form. +-* = 'I': like COMPQ='V', except that Q will be initialized to +-* the identity first. ++* = 'N': Left Schur vectors (Q) are not computed; ++* = 'I': Q is initialized to the unit matrix and the matrix Q ++* of left Schur vectors of (H,T) is returned; ++* = 'V': Q must contain an orthogonal matrix Q1 on entry and ++* the product Q1*Q is returned. + * + * COMPZ (input) CHARACTER*1 +-* = 'N': do not modify Z. +-* = 'V': multiply the array Z on the right by the orthogonal +-* tranformation that is applied to the right side of +-* A and B to reduce them to Schur form. +-* = 'I': like COMPZ='V', except that Z will be initialized to +-* the identity first. ++* = 'N': Right Schur vectors (Z) are not computed; ++* = 'I': Z is initialized to the unit matrix and the matrix Z ++* of right Schur vectors of (H,T) is returned; ++* = 'V': Z must contain an orthogonal matrix Z1 on entry and ++* the product Z1*Z is returned. + * + * N (input) INTEGER +-* The order of the matrices A, B, Q, and Z. N >= 0. ++* The order of the matrices H, T, Q, and Z. N >= 0. + * + * ILO (input) INTEGER + * IHI (input) INTEGER +-* It is assumed that A is already upper triangular in rows and +-* columns 1:ILO-1 and IHI+1:N. +-* 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0. +-* +-* A (input/output) REAL array, dimension (LDA, N) +-* On entry, the N-by-N upper Hessenberg matrix A. Elements +-* below the subdiagonal must be zero. +-* If JOB='S', then on exit A and B will have been +-* simultaneously reduced to generalized Schur form. +-* If JOB='E', then on exit A will have been destroyed. +-* The diagonal blocks will be correct, but the off-diagonal +-* portion will be meaningless. +-* +-* LDA (input) INTEGER +-* The leading dimension of the array A. LDA >= max( 1, N ). +-* +-* B (input/output) REAL array, dimension (LDB, N) +-* On entry, the N-by-N upper triangular matrix B. Elements +-* below the diagonal must be zero. 2-by-2 blocks in B +-* corresponding to 2-by-2 blocks in A will be reduced to +-* positive diagonal form. (I.e., if A(j+1,j) is non-zero, +-* then B(j+1,j)=B(j,j+1)=0 and B(j,j) and B(j+1,j+1) will be +-* positive.) +-* If JOB='S', then on exit A and B will have been +-* simultaneously reduced to Schur form. +-* If JOB='E', then on exit B will have been destroyed. +-* Elements corresponding to diagonal blocks of A will be +-* correct, but the off-diagonal portion will be meaningless. ++* ILO and IHI mark the rows and columns of H which are in ++* Hessenberg form. It is assumed that A is already upper ++* triangular in rows and columns 1:ILO-1 and IHI+1:N. ++* If N > 0, 1 <= ILO <= IHI <= N; if N = 0, ILO=1 and IHI=0. ++* ++* H (input/output) REAL array, dimension (LDH, N) ++* On entry, the N-by-N upper Hessenberg matrix H. ++* On exit, if JOB = 'S', H contains the upper quasi-triangular ++* matrix S from the generalized Schur factorization; ++* 2-by-2 diagonal blocks (corresponding to complex conjugate ++* pairs of eigenvalues) are returned in standard form, with ++* H(i,i) = H(i+1,i+1) and H(i+1,i)*H(i,i+1) < 0. ++* If JOB = 'E', the diagonal blocks of H match those of S, but ++* the rest of H is unspecified. ++* ++* LDH (input) INTEGER ++* The leading dimension of the array H. LDH >= max( 1, N ). ++* ++* T (input/output) REAL array, dimension (LDT, N) ++* On entry, the N-by-N upper triangular matrix T. ++* On exit, if JOB = 'S', T contains the upper triangular ++* matrix P from the generalized Schur factorization; ++* 2-by-2 diagonal blocks of P corresponding to 2-by-2 blocks of S ++* are reduced to positive diagonal form, i.e., if H(j+1,j) is ++* non-zero, then T(j+1,j) = T(j,j+1) = 0, T(j,j) > 0, and ++* T(j+1,j+1) > 0. ++* If JOB = 'E', the diagonal blocks of T match those of P, but ++* the rest of T is unspecified. + * +-* LDB (input) INTEGER +-* The leading dimension of the array B. LDB >= max( 1, N ). ++* LDT (input) INTEGER ++* The leading dimension of the array T. LDT >= max( 1, N ). + * + * ALPHAR (output) REAL array, dimension (N) +-* ALPHAR(1:N) will be set to real parts of the diagonal +-* elements of A that would result from reducing A and B to +-* Schur form and then further reducing them both to triangular +-* form using unitary transformations s.t. the diagonal of B +-* was non-negative real. Thus, if A(j,j) is in a 1-by-1 block +-* (i.e., A(j+1,j)=A(j,j+1)=0), then ALPHAR(j)=A(j,j). +-* Note that the (real or complex) values +-* (ALPHAR(j) + i*ALPHAI(j))/BETA(j), j=1,...,N, are the +-* generalized eigenvalues of the matrix pencil A - wB. ++* The real parts of each scalar alpha defining an eigenvalue ++* of GNEP. + * + * ALPHAI (output) REAL array, dimension (N) +-* ALPHAI(1:N) will be set to imaginary parts of the diagonal +-* elements of A that would result from reducing A and B to +-* Schur form and then further reducing them both to triangular +-* form using unitary transformations s.t. the diagonal of B +-* was non-negative real. Thus, if A(j,j) is in a 1-by-1 block +-* (i.e., A(j+1,j)=A(j,j+1)=0), then ALPHAR(j)=0. +-* Note that the (real or complex) values +-* (ALPHAR(j) + i*ALPHAI(j))/BETA(j), j=1,...,N, are the +-* generalized eigenvalues of the matrix pencil A - wB. ++* The imaginary parts of each scalar alpha defining an ++* eigenvalue of GNEP. ++* If ALPHAI(j) is zero, then the j-th eigenvalue is real; if ++* positive, then the j-th and (j+1)-st eigenvalues are a ++* complex conjugate pair, with ALPHAI(j+1) = -ALPHAI(j). + * + * BETA (output) REAL array, dimension (N) +-* BETA(1:N) will be set to the (real) diagonal elements of B +-* that would result from reducing A and B to Schur form and +-* then further reducing them both to triangular form using +-* unitary transformations s.t. the diagonal of B was +-* non-negative real. Thus, if A(j,j) is in a 1-by-1 block +-* (i.e., A(j+1,j)=A(j,j+1)=0), then BETA(j)=B(j,j). +-* Note that the (real or complex) values +-* (ALPHAR(j) + i*ALPHAI(j))/BETA(j), j=1,...,N, are the +-* generalized eigenvalues of the matrix pencil A - wB. +-* (Note that BETA(1:N) will always be non-negative, and no +-* BETAI is necessary.) ++* The scalars beta that define the eigenvalues of GNEP. ++* Together, the quantities alpha = (ALPHAR(j),ALPHAI(j)) and ++* beta = BETA(j) represent the j-th eigenvalue of the matrix ++* pair (A,B), in one of the forms lambda = alpha/beta or ++* mu = beta/alpha. Since either lambda or mu may overflow, ++* they should not, in general, be computed. + * + * Q (input/output) REAL array, dimension (LDQ, N) +-* If COMPQ='N', then Q will not be referenced. +-* If COMPQ='V' or 'I', then the transpose of the orthogonal +-* transformations which are applied to A and B on the left +-* will be applied to the array Q on the right. ++* On entry, if COMPZ = 'V', the orthogonal matrix Q1 used in ++* the reduction of (A,B) to generalized Hessenberg form. ++* On exit, if COMPZ = 'I', the orthogonal matrix of left Schur ++* vectors of (H,T), and if COMPZ = 'V', the orthogonal matrix ++* of left Schur vectors of (A,B). ++* Not referenced if COMPZ = 'N'. + * + * LDQ (input) INTEGER + * The leading dimension of the array Q. LDQ >= 1. + * If COMPQ='V' or 'I', then LDQ >= N. + * + * Z (input/output) REAL array, dimension (LDZ, N) +-* If COMPZ='N', then Z will not be referenced. +-* If COMPZ='V' or 'I', then the orthogonal transformations +-* which are applied to A and B on the right will be applied +-* to the array Z on the right. ++* On entry, if COMPZ = 'V', the orthogonal matrix Z1 used in ++* the reduction of (A,B) to generalized Hessenberg form. ++* On exit, if COMPZ = 'I', the orthogonal matrix of ++* right Schur vectors of (H,T), and if COMPZ = 'V', the ++* orthogonal matrix of right Schur vectors of (A,B). ++* Not referenced if COMPZ = 'N'. + * + * LDZ (input) INTEGER + * The leading dimension of the array Z. LDZ >= 1. +@@ -187,13 +190,12 @@ + * INFO (output) INTEGER + * = 0: successful exit + * < 0: if INFO = -i, the i-th argument had an illegal value +-* = 1,...,N: the QZ iteration did not converge. (A,B) is not ++* = 1,...,N: the QZ iteration did not converge. (H,T) is not + * in Schur form, but ALPHAR(i), ALPHAI(i), and + * BETA(i), i=INFO+1,...,N should be correct. +-* = N+1,...,2*N: the shift calculation failed. (A,B) is not ++* = N+1,...,2*N: the shift calculation failed. (H,T) is not + * in Schur form, but ALPHAR(i), ALPHAI(i), and + * BETA(i), i=INFO-N+1,...,N should be correct. +-* > 2*N: various "impossible" errors. + * + * Further Details + * =============== +@@ -225,7 +227,7 @@ + $ B1R, B22, B2A, B2I, B2R, BN, BNORM, BSCALE, + $ BTOL, C, C11I, C11R, C12, C21, C22I, C22R, CL, + $ CQ, CR, CZ, ESHIFT, S, S1, S1INV, S2, SAFMAX, +- $ SAFMIN, SCALE, SL, SQI, SQR, SR, SZI, SZR, T, ++ $ SAFMIN, SCALE, SL, SQI, SQR, SR, SZI, SZR, T1, + $ TAU, TEMP, TEMP2, TEMPI, TEMPR, U1, U12, U12L, + $ U2, ULP, VS, W11, W12, W21, W22, WABS, WI, WR, + $ WR2 +@@ -302,9 +304,9 @@ + INFO = -5 + ELSE IF( IHI.GT.N .OR. IHI.LT.ILO-1 ) THEN + INFO = -6 +- ELSE IF( LDA.LT.N ) THEN ++ ELSE IF( LDH.LT.N ) THEN + INFO = -8 +- ELSE IF( LDB.LT.N ) THEN ++ ELSE IF( LDT.LT.N ) THEN + INFO = -10 + ELSE IF( LDQ.LT.1 .OR. ( ILQ .AND. LDQ.LT.N ) ) THEN + INFO = -15 +@@ -340,8 +342,8 @@ + SAFMIN = SLAMCH( 'S' ) + SAFMAX = ONE / SAFMIN + ULP = SLAMCH( 'E' )*SLAMCH( 'B' ) +- ANORM = SLANHS( 'F', IN, A( ILO, ILO ), LDA, WORK ) +- BNORM = SLANHS( 'F', IN, B( ILO, ILO ), LDB, WORK ) ++ ANORM = SLANHS( 'F', IN, H( ILO, ILO ), LDH, WORK ) ++ BNORM = SLANHS( 'F', IN, T( ILO, ILO ), LDT, WORK ) + ATOL = MAX( SAFMIN, ULP*ANORM ) + BTOL = MAX( SAFMIN, ULP*BNORM ) + ASCALE = ONE / MAX( SAFMIN, ANORM ) +@@ -350,15 +352,15 @@ + * Set Eigenvalues IHI+1:N + * + DO 30 J = IHI + 1, N +- IF( B( J, J ).LT.ZERO ) THEN ++ IF( T( J, J ).LT.ZERO ) THEN + IF( ILSCHR ) THEN + DO 10 JR = 1, J +- A( JR, J ) = -A( JR, J ) +- B( JR, J ) = -B( JR, J ) ++ H( JR, J ) = -H( JR, J ) ++ T( JR, J ) = -T( JR, J ) + 10 CONTINUE + ELSE +- A( J, J ) = -A( J, J ) +- B( J, J ) = -B( J, J ) ++ H( J, J ) = -H( J, J ) ++ T( J, J ) = -T( J, J ) + END IF + IF( ILZ ) THEN + DO 20 JR = 1, N +@@ -366,9 +368,9 @@ + 20 CONTINUE + END IF + END IF +- ALPHAR( J ) = A( J, J ) ++ ALPHAR( J ) = H( J, J ) + ALPHAI( J ) = ZERO +- BETA( J ) = B( J, J ) ++ BETA( J ) = T( J, J ) + 30 CONTINUE + * + * If IHI < ILO, skip QZ steps +@@ -408,8 +410,8 @@ + * Split the matrix if possible. + * + * Two tests: +-* 1: A(j,j-1)=0 or j=ILO +-* 2: B(j,j)=0 ++* 1: H(j,j-1)=0 or j=ILO ++* 2: T(j,j)=0 + * + IF( ILAST.EQ.ILO ) THEN + * +@@ -417,14 +419,14 @@ + * + GO TO 80 + ELSE +- IF( ABS( A( ILAST, ILAST-1 ) ).LE.ATOL ) THEN +- A( ILAST, ILAST-1 ) = ZERO ++ IF( ABS( H( ILAST, ILAST-1 ) ).LE.ATOL ) THEN ++ H( ILAST, ILAST-1 ) = ZERO + GO TO 80 + END IF + END IF + * +- IF( ABS( B( ILAST, ILAST ) ).LE.BTOL ) THEN +- B( ILAST, ILAST ) = ZERO ++ IF( ABS( T( ILAST, ILAST ) ).LE.BTOL ) THEN ++ T( ILAST, ILAST ) = ZERO + GO TO 70 + END IF + * +@@ -432,36 +434,36 @@ + * + DO 60 J = ILAST - 1, ILO, -1 + * +-* Test 1: for A(j,j-1)=0 or j=ILO ++* Test 1: for H(j,j-1)=0 or j=ILO + * + IF( J.EQ.ILO ) THEN + ILAZRO = .TRUE. + ELSE +- IF( ABS( A( J, J-1 ) ).LE.ATOL ) THEN +- A( J, J-1 ) = ZERO ++ IF( ABS( H( J, J-1 ) ).LE.ATOL ) THEN ++ H( J, J-1 ) = ZERO + ILAZRO = .TRUE. + ELSE + ILAZRO = .FALSE. + END IF + END IF + * +-* Test 2: for B(j,j)=0 ++* Test 2: for T(j,j)=0 + * +- IF( ABS( B( J, J ) ).LT.BTOL ) THEN +- B( J, J ) = ZERO ++ IF( ABS( T( J, J ) ).LT.BTOL ) THEN ++ T( J, J ) = ZERO + * + * Test 1a: Check for 2 consecutive small subdiagonals in A + * + ILAZR2 = .FALSE. + IF( .NOT.ILAZRO ) THEN +- TEMP = ABS( A( J, J-1 ) ) +- TEMP2 = ABS( A( J, J ) ) ++ TEMP = ABS( H( J, J-1 ) ) ++ TEMP2 = ABS( H( J, J ) ) + TEMPR = MAX( TEMP, TEMP2 ) + IF( TEMPR.LT.ONE .AND. TEMPR.NE.ZERO ) THEN + TEMP = TEMP / TEMPR + TEMP2 = TEMP2 / TEMPR + END IF +- IF( TEMP*( ASCALE*ABS( A( J+1, J ) ) ).LE.TEMP2* ++ IF( TEMP*( ASCALE*ABS( H( J+1, J ) ) ).LE.TEMP2* + $ ( ASCALE*ATOL ) )ILAZR2 = .TRUE. + END IF + * +@@ -473,21 +475,21 @@ + * + IF( ILAZRO .OR. ILAZR2 ) THEN + DO 40 JCH = J, ILAST - 1 +- TEMP = A( JCH, JCH ) +- CALL SLARTG( TEMP, A( JCH+1, JCH ), C, S, +- $ A( JCH, JCH ) ) +- A( JCH+1, JCH ) = ZERO +- CALL SROT( ILASTM-JCH, A( JCH, JCH+1 ), LDA, +- $ A( JCH+1, JCH+1 ), LDA, C, S ) +- CALL SROT( ILASTM-JCH, B( JCH, JCH+1 ), LDB, +- $ B( JCH+1, JCH+1 ), LDB, C, S ) ++ TEMP = H( JCH, JCH ) ++ CALL SLARTG( TEMP, H( JCH+1, JCH ), C, S, ++ $ H( JCH, JCH ) ) ++ H( JCH+1, JCH ) = ZERO ++ CALL SROT( ILASTM-JCH, H( JCH, JCH+1 ), LDH, ++ $ H( JCH+1, JCH+1 ), LDH, C, S ) ++ CALL SROT( ILASTM-JCH, T( JCH, JCH+1 ), LDT, ++ $ T( JCH+1, JCH+1 ), LDT, C, S ) + IF( ILQ ) + $ CALL SROT( N, Q( 1, JCH ), 1, Q( 1, JCH+1 ), 1, + $ C, S ) + IF( ILAZR2 ) +- $ A( JCH, JCH-1 ) = A( JCH, JCH-1 )*C ++ $ H( JCH, JCH-1 ) = H( JCH, JCH-1 )*C + ILAZR2 = .FALSE. +- IF( ABS( B( JCH+1, JCH+1 ) ).GE.BTOL ) THEN ++ IF( ABS( T( JCH+1, JCH+1 ) ).GE.BTOL ) THEN + IF( JCH+1.GE.ILAST ) THEN + GO TO 80 + ELSE +@@ -495,35 +497,35 @@ + GO TO 110 + END IF + END IF +- B( JCH+1, JCH+1 ) = ZERO ++ T( JCH+1, JCH+1 ) = ZERO + 40 CONTINUE + GO TO 70 + ELSE + * +-* Only test 2 passed -- chase the zero to B(ILAST,ILAST) +-* Then process as in the case B(ILAST,ILAST)=0 ++* Only test 2 passed -- chase the zero to T(ILAST,ILAST) ++* Then process as in the case T(ILAST,ILAST)=0 + * + DO 50 JCH = J, ILAST - 1 +- TEMP = B( JCH, JCH+1 ) +- CALL SLARTG( TEMP, B( JCH+1, JCH+1 ), C, S, +- $ B( JCH, JCH+1 ) ) +- B( JCH+1, JCH+1 ) = ZERO ++ TEMP = T( JCH, JCH+1 ) ++ CALL SLARTG( TEMP, T( JCH+1, JCH+1 ), C, S, ++ $ T( JCH, JCH+1 ) ) ++ T( JCH+1, JCH+1 ) = ZERO + IF( JCH.LT.ILASTM-1 ) +- $ CALL SROT( ILASTM-JCH-1, B( JCH, JCH+2 ), LDB, +- $ B( JCH+1, JCH+2 ), LDB, C, S ) +- CALL SROT( ILASTM-JCH+2, A( JCH, JCH-1 ), LDA, +- $ A( JCH+1, JCH-1 ), LDA, C, S ) ++ $ CALL SROT( ILASTM-JCH-1, T( JCH, JCH+2 ), LDT, ++ $ T( JCH+1, JCH+2 ), LDT, C, S ) ++ CALL SROT( ILASTM-JCH+2, H( JCH, JCH-1 ), LDH, ++ $ H( JCH+1, JCH-1 ), LDH, C, S ) + IF( ILQ ) + $ CALL SROT( N, Q( 1, JCH ), 1, Q( 1, JCH+1 ), 1, + $ C, S ) +- TEMP = A( JCH+1, JCH ) +- CALL SLARTG( TEMP, A( JCH+1, JCH-1 ), C, S, +- $ A( JCH+1, JCH ) ) +- A( JCH+1, JCH-1 ) = ZERO +- CALL SROT( JCH+1-IFRSTM, A( IFRSTM, JCH ), 1, +- $ A( IFRSTM, JCH-1 ), 1, C, S ) +- CALL SROT( JCH-IFRSTM, B( IFRSTM, JCH ), 1, +- $ B( IFRSTM, JCH-1 ), 1, C, S ) ++ TEMP = H( JCH+1, JCH ) ++ CALL SLARTG( TEMP, H( JCH+1, JCH-1 ), C, S, ++ $ H( JCH+1, JCH ) ) ++ H( JCH+1, JCH-1 ) = ZERO ++ CALL SROT( JCH+1-IFRSTM, H( IFRSTM, JCH ), 1, ++ $ H( IFRSTM, JCH-1 ), 1, C, S ) ++ CALL SROT( JCH-IFRSTM, T( IFRSTM, JCH ), 1, ++ $ T( IFRSTM, JCH-1 ), 1, C, S ) + IF( ILZ ) + $ CALL SROT( N, Z( 1, JCH ), 1, Z( 1, JCH-1 ), 1, + $ C, S ) +@@ -547,34 +549,34 @@ + INFO = N + 1 + GO TO 420 + * +-* B(ILAST,ILAST)=0 -- clear A(ILAST,ILAST-1) to split off a ++* T(ILAST,ILAST)=0 -- clear H(ILAST,ILAST-1) to split off a + * 1x1 block. + * + 70 CONTINUE +- TEMP = A( ILAST, ILAST ) +- CALL SLARTG( TEMP, A( ILAST, ILAST-1 ), C, S, +- $ A( ILAST, ILAST ) ) +- A( ILAST, ILAST-1 ) = ZERO +- CALL SROT( ILAST-IFRSTM, A( IFRSTM, ILAST ), 1, +- $ A( IFRSTM, ILAST-1 ), 1, C, S ) +- CALL SROT( ILAST-IFRSTM, B( IFRSTM, ILAST ), 1, +- $ B( IFRSTM, ILAST-1 ), 1, C, S ) ++ TEMP = H( ILAST, ILAST ) ++ CALL SLARTG( TEMP, H( ILAST, ILAST-1 ), C, S, ++ $ H( ILAST, ILAST ) ) ++ H( ILAST, ILAST-1 ) = ZERO ++ CALL SROT( ILAST-IFRSTM, H( IFRSTM, ILAST ), 1, ++ $ H( IFRSTM, ILAST-1 ), 1, C, S ) ++ CALL SROT( ILAST-IFRSTM, T( IFRSTM, ILAST ), 1, ++ $ T( IFRSTM, ILAST-1 ), 1, C, S ) + IF( ILZ ) + $ CALL SROT( N, Z( 1, ILAST ), 1, Z( 1, ILAST-1 ), 1, C, S ) + * +-* A(ILAST,ILAST-1)=0 -- Standardize B, set ALPHAR, ALPHAI, ++* H(ILAST,ILAST-1)=0 -- Standardize B, set ALPHAR, ALPHAI, + * and BETA + * + 80 CONTINUE +- IF( B( ILAST, ILAST ).LT.ZERO ) THEN ++ IF( T( ILAST, ILAST ).LT.ZERO ) THEN + IF( ILSCHR ) THEN + DO 90 J = IFRSTM, ILAST +- A( J, ILAST ) = -A( J, ILAST ) +- B( J, ILAST ) = -B( J, ILAST ) ++ H( J, ILAST ) = -H( J, ILAST ) ++ T( J, ILAST ) = -T( J, ILAST ) + 90 CONTINUE + ELSE +- A( ILAST, ILAST ) = -A( ILAST, ILAST ) +- B( ILAST, ILAST ) = -B( ILAST, ILAST ) ++ H( ILAST, ILAST ) = -H( ILAST, ILAST ) ++ T( ILAST, ILAST ) = -T( ILAST, ILAST ) + END IF + IF( ILZ ) THEN + DO 100 J = 1, N +@@ -582,9 +584,9 @@ + 100 CONTINUE + END IF + END IF +- ALPHAR( ILAST ) = A( ILAST, ILAST ) ++ ALPHAR( ILAST ) = H( ILAST, ILAST ) + ALPHAI( ILAST ) = ZERO +- BETA( ILAST ) = B( ILAST, ILAST ) ++ BETA( ILAST ) = T( ILAST, ILAST ) + * + * Go to next block -- exit if finished. + * +@@ -617,7 +619,7 @@ + * Compute single shifts. + * + * At this point, IFIRST < ILAST, and the diagonal elements of +-* B(IFIRST:ILAST,IFIRST,ILAST) are larger than BTOL (in ++* T(IFIRST:ILAST,IFIRST,ILAST) are larger than BTOL (in + * magnitude) + * + IF( ( IITER / 10 )*10.EQ.IITER ) THEN +@@ -625,10 +627,10 @@ + * Exceptional shift. Chosen for no particularly good reason. + * (Single shift only.) + * +- IF( ( REAL( MAXIT )*SAFMIN )*ABS( A( ILAST-1, ILAST ) ).LT. +- $ ABS( B( ILAST-1, ILAST-1 ) ) ) THEN +- ESHIFT = ESHIFT + A( ILAST-1, ILAST ) / +- $ B( ILAST-1, ILAST-1 ) ++ IF( ( REAL( MAXIT )*SAFMIN )*ABS( H( ILAST-1, ILAST ) ).LT. ++ $ ABS( T( ILAST-1, ILAST-1 ) ) ) THEN ++ ESHIFT = ESHIFT + H( ILAST-1, ILAST ) / ++ $ T( ILAST-1, ILAST-1 ) + ELSE + ESHIFT = ESHIFT + ONE / ( SAFMIN*REAL( MAXIT ) ) + END IF +@@ -641,8 +643,8 @@ + * bottom-right 2x2 block of A and B. The first eigenvalue + * returned by SLAG2 is the Wilkinson shift (AEP p.512), + * +- CALL SLAG2( A( ILAST-1, ILAST-1 ), LDA, +- $ B( ILAST-1, ILAST-1 ), LDB, SAFMIN*SAFETY, S1, ++ CALL SLAG2( H( ILAST-1, ILAST-1 ), LDH, ++ $ T( ILAST-1, ILAST-1 ), LDT, SAFMIN*SAFETY, S1, + $ S2, WR, WR2, WI ) + * + TEMP = MAX( S1, SAFMIN*MAX( ONE, ABS( WR ), ABS( WI ) ) ) +@@ -669,14 +671,14 @@ + * + DO 120 J = ILAST - 1, IFIRST + 1, -1 + ISTART = J +- TEMP = ABS( S1*A( J, J-1 ) ) +- TEMP2 = ABS( S1*A( J, J )-WR*B( J, J ) ) ++ TEMP = ABS( S1*H( J, J-1 ) ) ++ TEMP2 = ABS( S1*H( J, J )-WR*T( J, J ) ) + TEMPR = MAX( TEMP, TEMP2 ) + IF( TEMPR.LT.ONE .AND. TEMPR.NE.ZERO ) THEN + TEMP = TEMP / TEMPR + TEMP2 = TEMP2 / TEMPR + END IF +- IF( ABS( ( ASCALE*A( J+1, J ) )*TEMP ).LE.( ASCALE*ATOL )* ++ IF( ABS( ( ASCALE*H( J+1, J ) )*TEMP ).LE.( ASCALE*ATOL )* + $ TEMP2 )GO TO 130 + 120 CONTINUE + * +@@ -687,26 +689,26 @@ + * + * Initial Q + * +- TEMP = S1*A( ISTART, ISTART ) - WR*B( ISTART, ISTART ) +- TEMP2 = S1*A( ISTART+1, ISTART ) ++ TEMP = S1*H( ISTART, ISTART ) - WR*T( ISTART, ISTART ) ++ TEMP2 = S1*H( ISTART+1, ISTART ) + CALL SLARTG( TEMP, TEMP2, C, S, TEMPR ) + * + * Sweep + * + DO 190 J = ISTART, ILAST - 1 + IF( J.GT.ISTART ) THEN +- TEMP = A( J, J-1 ) +- CALL SLARTG( TEMP, A( J+1, J-1 ), C, S, A( J, J-1 ) ) +- A( J+1, J-1 ) = ZERO ++ TEMP = H( J, J-1 ) ++ CALL SLARTG( TEMP, H( J+1, J-1 ), C, S, H( J, J-1 ) ) ++ H( J+1, J-1 ) = ZERO + END IF + * + DO 140 JC = J, ILASTM +- TEMP = C*A( J, JC ) + S*A( J+1, JC ) +- A( J+1, JC ) = -S*A( J, JC ) + C*A( J+1, JC ) +- A( J, JC ) = TEMP +- TEMP2 = C*B( J, JC ) + S*B( J+1, JC ) +- B( J+1, JC ) = -S*B( J, JC ) + C*B( J+1, JC ) +- B( J, JC ) = TEMP2 ++ TEMP = C*H( J, JC ) + S*H( J+1, JC ) ++ H( J+1, JC ) = -S*H( J, JC ) + C*H( J+1, JC ) ++ H( J, JC ) = TEMP ++ TEMP2 = C*T( J, JC ) + S*T( J+1, JC ) ++ T( J+1, JC ) = -S*T( J, JC ) + C*T( J+1, JC ) ++ T( J, JC ) = TEMP2 + 140 CONTINUE + IF( ILQ ) THEN + DO 150 JR = 1, N +@@ -716,19 +718,19 @@ + 150 CONTINUE + END IF + * +- TEMP = B( J+1, J+1 ) +- CALL SLARTG( TEMP, B( J+1, J ), C, S, B( J+1, J+1 ) ) +- B( J+1, J ) = ZERO ++ TEMP = T( J+1, J+1 ) ++ CALL SLARTG( TEMP, T( J+1, J ), C, S, T( J+1, J+1 ) ) ++ T( J+1, J ) = ZERO + * + DO 160 JR = IFRSTM, MIN( J+2, ILAST ) +- TEMP = C*A( JR, J+1 ) + S*A( JR, J ) +- A( JR, J ) = -S*A( JR, J+1 ) + C*A( JR, J ) +- A( JR, J+1 ) = TEMP ++ TEMP = C*H( JR, J+1 ) + S*H( JR, J ) ++ H( JR, J ) = -S*H( JR, J+1 ) + C*H( JR, J ) ++ H( JR, J+1 ) = TEMP + 160 CONTINUE + DO 170 JR = IFRSTM, J +- TEMP = C*B( JR, J+1 ) + S*B( JR, J ) +- B( JR, J ) = -S*B( JR, J+1 ) + C*B( JR, J ) +- B( JR, J+1 ) = TEMP ++ TEMP = C*T( JR, J+1 ) + S*T( JR, J ) ++ T( JR, J ) = -S*T( JR, J+1 ) + C*T( JR, J ) ++ T( JR, J+1 ) = TEMP + 170 CONTINUE + IF( ILZ ) THEN + DO 180 JR = 1, N +@@ -759,8 +761,8 @@ + * B = ( ) with B11 non-negative. + * ( 0 B22 ) + * +- CALL SLASV2( B( ILAST-1, ILAST-1 ), B( ILAST-1, ILAST ), +- $ B( ILAST, ILAST ), B22, B11, SR, CR, SL, CL ) ++ CALL SLASV2( T( ILAST-1, ILAST-1 ), T( ILAST-1, ILAST ), ++ $ T( ILAST, ILAST ), B22, B11, SR, CR, SL, CL ) + * + IF( B11.LT.ZERO ) THEN + CR = -CR +@@ -769,17 +771,17 @@ + B22 = -B22 + END IF + * +- CALL SROT( ILASTM+1-IFIRST, A( ILAST-1, ILAST-1 ), LDA, +- $ A( ILAST, ILAST-1 ), LDA, CL, SL ) +- CALL SROT( ILAST+1-IFRSTM, A( IFRSTM, ILAST-1 ), 1, +- $ A( IFRSTM, ILAST ), 1, CR, SR ) ++ CALL SROT( ILASTM+1-IFIRST, H( ILAST-1, ILAST-1 ), LDH, ++ $ H( ILAST, ILAST-1 ), LDH, CL, SL ) ++ CALL SROT( ILAST+1-IFRSTM, H( IFRSTM, ILAST-1 ), 1, ++ $ H( IFRSTM, ILAST ), 1, CR, SR ) + * + IF( ILAST.LT.ILASTM ) +- $ CALL SROT( ILASTM-ILAST, B( ILAST-1, ILAST+1 ), LDB, +- $ B( ILAST, ILAST+1 ), LDA, CL, SL ) ++ $ CALL SROT( ILASTM-ILAST, T( ILAST-1, ILAST+1 ), LDT, ++ $ T( ILAST, ILAST+1 ), LDH, CL, SL ) + IF( IFRSTM.LT.ILAST-1 ) +- $ CALL SROT( IFIRST-IFRSTM, B( IFRSTM, ILAST-1 ), 1, +- $ B( IFRSTM, ILAST ), 1, CR, SR ) ++ $ CALL SROT( IFIRST-IFRSTM, T( IFRSTM, ILAST-1 ), 1, ++ $ T( IFRSTM, ILAST ), 1, CR, SR ) + * + IF( ILQ ) + $ CALL SROT( N, Q( 1, ILAST-1 ), 1, Q( 1, ILAST ), 1, CL, +@@ -788,17 +790,17 @@ + $ CALL SROT( N, Z( 1, ILAST-1 ), 1, Z( 1, ILAST ), 1, CR, + $ SR ) + * +- B( ILAST-1, ILAST-1 ) = B11 +- B( ILAST-1, ILAST ) = ZERO +- B( ILAST, ILAST-1 ) = ZERO +- B( ILAST, ILAST ) = B22 ++ T( ILAST-1, ILAST-1 ) = B11 ++ T( ILAST-1, ILAST ) = ZERO ++ T( ILAST, ILAST-1 ) = ZERO ++ T( ILAST, ILAST ) = B22 + * + * If B22 is negative, negate column ILAST + * + IF( B22.LT.ZERO ) THEN + DO 210 J = IFRSTM, ILAST +- A( J, ILAST ) = -A( J, ILAST ) +- B( J, ILAST ) = -B( J, ILAST ) ++ H( J, ILAST ) = -H( J, ILAST ) ++ T( J, ILAST ) = -T( J, ILAST ) + 210 CONTINUE + * + IF( ILZ ) THEN +@@ -812,8 +814,8 @@ + * + * Recompute shift + * +- CALL SLAG2( A( ILAST-1, ILAST-1 ), LDA, +- $ B( ILAST-1, ILAST-1 ), LDB, SAFMIN*SAFETY, S1, ++ CALL SLAG2( H( ILAST-1, ILAST-1 ), LDH, ++ $ T( ILAST-1, ILAST-1 ), LDT, SAFMIN*SAFETY, S1, + $ TEMP, WR, TEMP2, WI ) + * + * If standardization has perturbed the shift onto real line, +@@ -825,10 +827,10 @@ + * + * Do EISPACK (QZVAL) computation of alpha and beta + * +- A11 = A( ILAST-1, ILAST-1 ) +- A21 = A( ILAST, ILAST-1 ) +- A12 = A( ILAST-1, ILAST ) +- A22 = A( ILAST, ILAST ) ++ A11 = H( ILAST-1, ILAST-1 ) ++ A21 = H( ILAST, ILAST-1 ) ++ A12 = H( ILAST-1, ILAST ) ++ A22 = H( ILAST, ILAST ) + * + * Compute complex Givens rotation on right + * (Assume some element of C = (sA - wB) > unfl ) +@@ -845,10 +847,10 @@ + * + IF( ABS( C11R )+ABS( C11I )+ABS( C12 ).GT.ABS( C21 )+ + $ ABS( C22R )+ABS( C22I ) ) THEN +- T = SLAPY3( C12, C11R, C11I ) +- CZ = C12 / T +- SZR = -C11R / T +- SZI = -C11I / T ++ T1 = SLAPY3( C12, C11R, C11I ) ++ CZ = C12 / T1 ++ SZR = -C11R / T1 ++ SZI = -C11I / T1 + ELSE + CZ = SLAPY2( C22R, C22I ) + IF( CZ.LE.SAFMIN ) THEN +@@ -858,10 +860,10 @@ + ELSE + TEMPR = C22R / CZ + TEMPI = C22I / CZ +- T = SLAPY2( CZ, C21 ) +- CZ = CZ / T +- SZR = -C21*TEMPR / T +- SZI = C21*TEMPI / T ++ T1 = SLAPY2( CZ, C21 ) ++ CZ = CZ / T1 ++ SZR = -C21*TEMPR / T1 ++ SZI = C21*TEMPI / T1 + END IF + END IF + * +@@ -895,10 +897,10 @@ + SQI = TEMPI*A2R - TEMPR*A2I + END IF + END IF +- T = SLAPY3( CQ, SQR, SQI ) +- CQ = CQ / T +- SQR = SQR / T +- SQI = SQI / T ++ T1 = SLAPY3( CQ, SQR, SQI ) ++ CQ = CQ / T1 ++ SQR = SQR / T1 ++ SQI = SQI / T1 + * + * Compute diagonal elements of QBZ + * +@@ -950,26 +952,26 @@ + * + * We assume that the block is at least 3x3 + * +- AD11 = ( ASCALE*A( ILAST-1, ILAST-1 ) ) / +- $ ( BSCALE*B( ILAST-1, ILAST-1 ) ) +- AD21 = ( ASCALE*A( ILAST, ILAST-1 ) ) / +- $ ( BSCALE*B( ILAST-1, ILAST-1 ) ) +- AD12 = ( ASCALE*A( ILAST-1, ILAST ) ) / +- $ ( BSCALE*B( ILAST, ILAST ) ) +- AD22 = ( ASCALE*A( ILAST, ILAST ) ) / +- $ ( BSCALE*B( ILAST, ILAST ) ) +- U12 = B( ILAST-1, ILAST ) / B( ILAST, ILAST ) +- AD11L = ( ASCALE*A( IFIRST, IFIRST ) ) / +- $ ( BSCALE*B( IFIRST, IFIRST ) ) +- AD21L = ( ASCALE*A( IFIRST+1, IFIRST ) ) / +- $ ( BSCALE*B( IFIRST, IFIRST ) ) +- AD12L = ( ASCALE*A( IFIRST, IFIRST+1 ) ) / +- $ ( BSCALE*B( IFIRST+1, IFIRST+1 ) ) +- AD22L = ( ASCALE*A( IFIRST+1, IFIRST+1 ) ) / +- $ ( BSCALE*B( IFIRST+1, IFIRST+1 ) ) +- AD32L = ( ASCALE*A( IFIRST+2, IFIRST+1 ) ) / +- $ ( BSCALE*B( IFIRST+1, IFIRST+1 ) ) +- U12L = B( IFIRST, IFIRST+1 ) / B( IFIRST+1, IFIRST+1 ) ++ AD11 = ( ASCALE*H( ILAST-1, ILAST-1 ) ) / ++ $ ( BSCALE*T( ILAST-1, ILAST-1 ) ) ++ AD21 = ( ASCALE*H( ILAST, ILAST-1 ) ) / ++ $ ( BSCALE*T( ILAST-1, ILAST-1 ) ) ++ AD12 = ( ASCALE*H( ILAST-1, ILAST ) ) / ++ $ ( BSCALE*T( ILAST, ILAST ) ) ++ AD22 = ( ASCALE*H( ILAST, ILAST ) ) / ++ $ ( BSCALE*T( ILAST, ILAST ) ) ++ U12 = T( ILAST-1, ILAST ) / T( ILAST, ILAST ) ++ AD11L = ( ASCALE*H( IFIRST, IFIRST ) ) / ++ $ ( BSCALE*T( IFIRST, IFIRST ) ) ++ AD21L = ( ASCALE*H( IFIRST+1, IFIRST ) ) / ++ $ ( BSCALE*T( IFIRST, IFIRST ) ) ++ AD12L = ( ASCALE*H( IFIRST, IFIRST+1 ) ) / ++ $ ( BSCALE*T( IFIRST+1, IFIRST+1 ) ) ++ AD22L = ( ASCALE*H( IFIRST+1, IFIRST+1 ) ) / ++ $ ( BSCALE*T( IFIRST+1, IFIRST+1 ) ) ++ AD32L = ( ASCALE*H( IFIRST+2, IFIRST+1 ) ) / ++ $ ( BSCALE*T( IFIRST+1, IFIRST+1 ) ) ++ U12L = T( IFIRST, IFIRST+1 ) / T( IFIRST+1, IFIRST+1 ) + * + V( 1 ) = ( AD11-AD11L )*( AD22-AD11L ) - AD12*AD21 + + $ AD21*U12*AD11L + ( AD12L-AD11L*U12L )*AD21L +@@ -991,27 +993,27 @@ + * Zero (j-1)st column of A + * + IF( J.GT.ISTART ) THEN +- V( 1 ) = A( J, J-1 ) +- V( 2 ) = A( J+1, J-1 ) +- V( 3 ) = A( J+2, J-1 ) ++ V( 1 ) = H( J, J-1 ) ++ V( 2 ) = H( J+1, J-1 ) ++ V( 3 ) = H( J+2, J-1 ) + * +- CALL SLARFG( 3, A( J, J-1 ), V( 2 ), 1, TAU ) ++ CALL SLARFG( 3, H( J, J-1 ), V( 2 ), 1, TAU ) + V( 1 ) = ONE +- A( J+1, J-1 ) = ZERO +- A( J+2, J-1 ) = ZERO ++ H( J+1, J-1 ) = ZERO ++ H( J+2, J-1 ) = ZERO + END IF + * + DO 230 JC = J, ILASTM +- TEMP = TAU*( A( J, JC )+V( 2 )*A( J+1, JC )+V( 3 )* +- $ A( J+2, JC ) ) +- A( J, JC ) = A( J, JC ) - TEMP +- A( J+1, JC ) = A( J+1, JC ) - TEMP*V( 2 ) +- A( J+2, JC ) = A( J+2, JC ) - TEMP*V( 3 ) +- TEMP2 = TAU*( B( J, JC )+V( 2 )*B( J+1, JC )+V( 3 )* +- $ B( J+2, JC ) ) +- B( J, JC ) = B( J, JC ) - TEMP2 +- B( J+1, JC ) = B( J+1, JC ) - TEMP2*V( 2 ) +- B( J+2, JC ) = B( J+2, JC ) - TEMP2*V( 3 ) ++ TEMP = TAU*( H( J, JC )+V( 2 )*H( J+1, JC )+V( 3 )* ++ $ H( J+2, JC ) ) ++ H( J, JC ) = H( J, JC ) - TEMP ++ H( J+1, JC ) = H( J+1, JC ) - TEMP*V( 2 ) ++ H( J+2, JC ) = H( J+2, JC ) - TEMP*V( 3 ) ++ TEMP2 = TAU*( T( J, JC )+V( 2 )*T( J+1, JC )+V( 3 )* ++ $ T( J+2, JC ) ) ++ T( J, JC ) = T( J, JC ) - TEMP2 ++ T( J+1, JC ) = T( J+1, JC ) - TEMP2*V( 2 ) ++ T( J+2, JC ) = T( J+2, JC ) - TEMP2*V( 3 ) + 230 CONTINUE + IF( ILQ ) THEN + DO 240 JR = 1, N +@@ -1028,27 +1030,27 @@ + * Swap rows to pivot + * + ILPIVT = .FALSE. +- TEMP = MAX( ABS( B( J+1, J+1 ) ), ABS( B( J+1, J+2 ) ) ) +- TEMP2 = MAX( ABS( B( J+2, J+1 ) ), ABS( B( J+2, J+2 ) ) ) ++ TEMP = MAX( ABS( T( J+1, J+1 ) ), ABS( T( J+1, J+2 ) ) ) ++ TEMP2 = MAX( ABS( T( J+2, J+1 ) ), ABS( T( J+2, J+2 ) ) ) + IF( MAX( TEMP, TEMP2 ).LT.SAFMIN ) THEN + SCALE = ZERO + U1 = ONE + U2 = ZERO + GO TO 250 + ELSE IF( TEMP.GE.TEMP2 ) THEN +- W11 = B( J+1, J+1 ) +- W21 = B( J+2, J+1 ) +- W12 = B( J+1, J+2 ) +- W22 = B( J+2, J+2 ) +- U1 = B( J+1, J ) +- U2 = B( J+2, J ) ++ W11 = T( J+1, J+1 ) ++ W21 = T( J+2, J+1 ) ++ W12 = T( J+1, J+2 ) ++ W22 = T( J+2, J+2 ) ++ U1 = T( J+1, J ) ++ U2 = T( J+2, J ) + ELSE +- W21 = B( J+1, J+1 ) +- W11 = B( J+2, J+1 ) +- W22 = B( J+1, J+2 ) +- W12 = B( J+2, J+2 ) +- U2 = B( J+1, J ) +- U1 = B( J+2, J ) ++ W21 = T( J+1, J+1 ) ++ W11 = T( J+2, J+1 ) ++ W22 = T( J+1, J+2 ) ++ W12 = T( J+2, J+2 ) ++ U2 = T( J+1, J ) ++ U1 = T( J+2, J ) + END IF + * + * Swap columns if nec. +@@ -1098,9 +1100,9 @@ + * + * Compute Householder Vector + * +- T = SQRT( SCALE**2+U1**2+U2**2 ) +- TAU = ONE + SCALE / T +- VS = -ONE / ( SCALE+T ) ++ T1 = SQRT( SCALE**2+U1**2+U2**2 ) ++ TAU = ONE + SCALE / T1 ++ VS = -ONE / ( SCALE+T1 ) + V( 1 ) = ONE + V( 2 ) = VS*U1 + V( 3 ) = VS*U2 +@@ -1108,18 +1110,18 @@ + * Apply transformations from the right. + * + DO 260 JR = IFRSTM, MIN( J+3, ILAST ) +- TEMP = TAU*( A( JR, J )+V( 2 )*A( JR, J+1 )+V( 3 )* +- $ A( JR, J+2 ) ) +- A( JR, J ) = A( JR, J ) - TEMP +- A( JR, J+1 ) = A( JR, J+1 ) - TEMP*V( 2 ) +- A( JR, J+2 ) = A( JR, J+2 ) - TEMP*V( 3 ) ++ TEMP = TAU*( H( JR, J )+V( 2 )*H( JR, J+1 )+V( 3 )* ++ $ H( JR, J+2 ) ) ++ H( JR, J ) = H( JR, J ) - TEMP ++ H( JR, J+1 ) = H( JR, J+1 ) - TEMP*V( 2 ) ++ H( JR, J+2 ) = H( JR, J+2 ) - TEMP*V( 3 ) + 260 CONTINUE + DO 270 JR = IFRSTM, J + 2 +- TEMP = TAU*( B( JR, J )+V( 2 )*B( JR, J+1 )+V( 3 )* +- $ B( JR, J+2 ) ) +- B( JR, J ) = B( JR, J ) - TEMP +- B( JR, J+1 ) = B( JR, J+1 ) - TEMP*V( 2 ) +- B( JR, J+2 ) = B( JR, J+2 ) - TEMP*V( 3 ) ++ TEMP = TAU*( T( JR, J )+V( 2 )*T( JR, J+1 )+V( 3 )* ++ $ T( JR, J+2 ) ) ++ T( JR, J ) = T( JR, J ) - TEMP ++ T( JR, J+1 ) = T( JR, J+1 ) - TEMP*V( 2 ) ++ T( JR, J+2 ) = T( JR, J+2 ) - TEMP*V( 3 ) + 270 CONTINUE + IF( ILZ ) THEN + DO 280 JR = 1, N +@@ -1130,8 +1132,8 @@ + Z( JR, J+2 ) = Z( JR, J+2 ) - TEMP*V( 3 ) + 280 CONTINUE + END IF +- B( J+1, J ) = ZERO +- B( J+2, J ) = ZERO ++ T( J+1, J ) = ZERO ++ T( J+2, J ) = ZERO + 290 CONTINUE + * + * Last elements: Use Givens rotations +@@ -1139,17 +1141,17 @@ + * Rotations from the left + * + J = ILAST - 1 +- TEMP = A( J, J-1 ) +- CALL SLARTG( TEMP, A( J+1, J-1 ), C, S, A( J, J-1 ) ) +- A( J+1, J-1 ) = ZERO ++ TEMP = H( J, J-1 ) ++ CALL SLARTG( TEMP, H( J+1, J-1 ), C, S, H( J, J-1 ) ) ++ H( J+1, J-1 ) = ZERO + * + DO 300 JC = J, ILASTM +- TEMP = C*A( J, JC ) + S*A( J+1, JC ) +- A( J+1, JC ) = -S*A( J, JC ) + C*A( J+1, JC ) +- A( J, JC ) = TEMP +- TEMP2 = C*B( J, JC ) + S*B( J+1, JC ) +- B( J+1, JC ) = -S*B( J, JC ) + C*B( J+1, JC ) +- B( J, JC ) = TEMP2 ++ TEMP = C*H( J, JC ) + S*H( J+1, JC ) ++ H( J+1, JC ) = -S*H( J, JC ) + C*H( J+1, JC ) ++ H( J, JC ) = TEMP ++ TEMP2 = C*T( J, JC ) + S*T( J+1, JC ) ++ T( J+1, JC ) = -S*T( J, JC ) + C*T( J+1, JC ) ++ T( J, JC ) = TEMP2 + 300 CONTINUE + IF( ILQ ) THEN + DO 310 JR = 1, N +@@ -1161,19 +1163,19 @@ + * + * Rotations from the right. + * +- TEMP = B( J+1, J+1 ) +- CALL SLARTG( TEMP, B( J+1, J ), C, S, B( J+1, J+1 ) ) +- B( J+1, J ) = ZERO ++ TEMP = T( J+1, J+1 ) ++ CALL SLARTG( TEMP, T( J+1, J ), C, S, T( J+1, J+1 ) ) ++ T( J+1, J ) = ZERO + * + DO 320 JR = IFRSTM, ILAST +- TEMP = C*A( JR, J+1 ) + S*A( JR, J ) +- A( JR, J ) = -S*A( JR, J+1 ) + C*A( JR, J ) +- A( JR, J+1 ) = TEMP ++ TEMP = C*H( JR, J+1 ) + S*H( JR, J ) ++ H( JR, J ) = -S*H( JR, J+1 ) + C*H( JR, J ) ++ H( JR, J+1 ) = TEMP + 320 CONTINUE + DO 330 JR = IFRSTM, ILAST - 1 +- TEMP = C*B( JR, J+1 ) + S*B( JR, J ) +- B( JR, J ) = -S*B( JR, J+1 ) + C*B( JR, J ) +- B( JR, J+1 ) = TEMP ++ TEMP = C*T( JR, J+1 ) + S*T( JR, J ) ++ T( JR, J ) = -S*T( JR, J+1 ) + C*T( JR, J ) ++ T( JR, J+1 ) = TEMP + 330 CONTINUE + IF( ILZ ) THEN + DO 340 JR = 1, N +@@ -1207,15 +1209,15 @@ + * Set Eigenvalues 1:ILO-1 + * + DO 410 J = 1, ILO - 1 +- IF( B( J, J ).LT.ZERO ) THEN ++ IF( T( J, J ).LT.ZERO ) THEN + IF( ILSCHR ) THEN + DO 390 JR = 1, J +- A( JR, J ) = -A( JR, J ) +- B( JR, J ) = -B( JR, J ) ++ H( JR, J ) = -H( JR, J ) ++ T( JR, J ) = -T( JR, J ) + 390 CONTINUE + ELSE +- A( J, J ) = -A( J, J ) +- B( J, J ) = -B( J, J ) ++ H( J, J ) = -H( J, J ) ++ T( J, J ) = -T( J, J ) + END IF + IF( ILZ ) THEN + DO 400 JR = 1, N +@@ -1223,9 +1225,9 @@ + 400 CONTINUE + END IF + END IF +- ALPHAR( J ) = A( J, J ) ++ ALPHAR( J ) = H( J, J ) + ALPHAI( J ) = ZERO +- BETA( J ) = B( J, J ) ++ BETA( J ) = T( J, J ) + 410 CONTINUE + * + * Normal Termination +diff -uNr LAPACK.orig/SRC/slasr.f LAPACK/SRC/slasr.f +--- LAPACK.orig/SRC/slasr.f Thu Nov 4 14:23:40 1999 ++++ LAPACK/SRC/slasr.f Fri May 25 16:12:26 2001 +@@ -3,7 +3,7 @@ + * -- LAPACK auxiliary routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* October 31, 1992 ++* May 3, 2001 + * + * .. Scalar Arguments .. + CHARACTER DIRECT, PIVOT, SIDE +@@ -16,44 +16,77 @@ + * Purpose + * ======= + * +-* SLASR performs the transformation +-* +-* A := P*A, when SIDE = 'L' or 'l' ( Left-hand side ) +-* +-* A := A*P', when SIDE = 'R' or 'r' ( Right-hand side ) +-* +-* where A is an m by n real matrix and P is an orthogonal matrix, +-* consisting of a sequence of plane rotations determined by the +-* parameters PIVOT and DIRECT as follows ( z = m when SIDE = 'L' or 'l' +-* and z = n when SIDE = 'R' or 'r' ): +-* +-* When DIRECT = 'F' or 'f' ( Forward sequence ) then +-* +-* P = P( z - 1 )*...*P( 2 )*P( 1 ), +-* +-* and when DIRECT = 'B' or 'b' ( Backward sequence ) then +-* +-* P = P( 1 )*P( 2 )*...*P( z - 1 ), +-* +-* where P( k ) is a plane rotation matrix for the following planes: +-* +-* when PIVOT = 'V' or 'v' ( Variable pivot ), +-* the plane ( k, k + 1 ) +-* +-* when PIVOT = 'T' or 't' ( Top pivot ), +-* the plane ( 1, k + 1 ) +-* +-* when PIVOT = 'B' or 'b' ( Bottom pivot ), +-* the plane ( k, z ) +-* +-* c( k ) and s( k ) must contain the cosine and sine that define the +-* matrix P( k ). The two by two plane rotation part of the matrix +-* P( k ), R( k ), is assumed to be of the form +-* +-* R( k ) = ( c( k ) s( k ) ). +-* ( -s( k ) c( k ) ) +-* +-* This version vectorises across rows of the array A when SIDE = 'L'. ++* SLASR applies a sequence of plane rotations to a real matrix A, ++* from either the left or the right. ++* ++* When SIDE = 'L', the transformation takes the form ++* ++* A := P*A ++* ++* and when SIDE = 'R', the transformation takes the form ++* ++* A := A*P**T ++* ++* where P is an orthogonal matrix consisting of a sequence of z plane ++* rotations, with z = M when SIDE = 'L' and z = N when SIDE = 'R', ++* and P**T is the transpose of P. ++* ++* When DIRECT = 'F' (Forward sequence), then ++* ++* P = P(z-1) * ... * P(2) * P(1) ++* ++* and when DIRECT = 'B' (Backward sequence), then ++* ++* P = P(1) * P(2) * ... * P(z-1) ++* ++* where P(k) is a plane rotation matrix defined by the 2-by-2 rotation ++* ++* R(k) = ( c(k) s(k) ) ++* = ( -s(k) c(k) ). ++* ++* When PIVOT = 'V' (Variable pivot), the rotation is performed ++* for the plane (k,k+1), i.e., P(k) has the form ++* ++* P(k) = ( 1 ) ++* ( ... ) ++* ( 1 ) ++* ( c(k) s(k) ) ++* ( -s(k) c(k) ) ++* ( 1 ) ++* ( ... ) ++* ( 1 ) ++* ++* where R(k) appears as a rank-2 modification to the identity matrix in ++* rows and columns k and k+1. ++* ++* When PIVOT = 'T' (Top pivot), the rotation is performed for the ++* plane (1,k+1), so P(k) has the form ++* ++* P(k) = ( c(k) s(k) ) ++* ( 1 ) ++* ( ... ) ++* ( 1 ) ++* ( -s(k) c(k) ) ++* ( 1 ) ++* ( ... ) ++* ( 1 ) ++* ++* where R(k) appears in rows and columns 1 and k+1. ++* ++* Similarly, when PIVOT = 'B' (Bottom pivot), the rotation is ++* performed for the plane (k,z), giving P(k) the form ++* ++* P(k) = ( 1 ) ++* ( ... ) ++* ( 1 ) ++* ( c(k) s(k) ) ++* ( 1 ) ++* ( ... ) ++* ( 1 ) ++* ( -s(k) c(k) ) ++* ++* where R(k) appears in rows and columns k and z. The rotations are ++* performed without ever forming P(k) explicitly. + * + * Arguments + * ========= +@@ -62,13 +95,13 @@ + * Specifies whether the plane rotation matrix P is applied to + * A on the left or the right. + * = 'L': Left, compute A := P*A +-* = 'R': Right, compute A:= A*P' ++* = 'R': Right, compute A:= A*P**T + * + * DIRECT (input) CHARACTER*1 + * Specifies whether P is a forward or backward sequence of + * plane rotations. +-* = 'F': Forward, P = P( z - 1 )*...*P( 2 )*P( 1 ) +-* = 'B': Backward, P = P( 1 )*P( 2 )*...*P( z - 1 ) ++* = 'F': Forward, P = P(z-1)*...*P(2)*P(1) ++* = 'B': Backward, P = P(1)*P(2)*...*P(z-1) + * + * PIVOT (input) CHARACTER*1 + * Specifies the plane for which P(k) is a plane rotation +@@ -85,18 +118,22 @@ + * The number of columns of the matrix A. If n <= 1, an + * immediate return is effected. + * +-* C, S (input) REAL arrays, dimension ++* C (input) REAL array, dimension ++* (M-1) if SIDE = 'L' ++* (N-1) if SIDE = 'R' ++* The cosines c(k) of the plane rotations. ++* ++* S (input) REAL array, dimension + * (M-1) if SIDE = 'L' + * (N-1) if SIDE = 'R' +-* c(k) and s(k) contain the cosine and sine that define the +-* matrix P(k). The two by two plane rotation part of the +-* matrix P(k), R(k), is assumed to be of the form +-* R( k ) = ( c( k ) s( k ) ). +-* ( -s( k ) c( k ) ) ++* The sines s(k) of the plane rotations. The 2-by-2 plane ++* rotation part of the matrix P(k), R(k), has the form ++* R(k) = ( c(k) s(k) ) ++* ( -s(k) c(k) ). + * + * A (input/output) REAL array, dimension (LDA,N) +-* The m by n matrix A. On exit, A is overwritten by P*A if +-* SIDE = 'R' or by A*P' if SIDE = 'L'. ++* The M-by-N matrix A. On exit, A is overwritten by P*A if ++* SIDE = 'R' or by A*P**T if SIDE = 'L'. + * + * LDA (input) INTEGER + * The leading dimension of the array A. LDA >= max(1,M). +diff -uNr LAPACK.orig/SRC/ssbgst.f LAPACK/SRC/ssbgst.f +--- LAPACK.orig/SRC/ssbgst.f Thu Nov 4 14:23:32 1999 ++++ LAPACK/SRC/ssbgst.f Fri May 25 16:12:46 2001 +@@ -4,7 +4,7 @@ + * -- LAPACK routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* June 30, 1999 ++* January 9, 2001 + * + * .. Scalar Arguments .. + CHARACTER UPLO, VECT +@@ -125,7 +125,7 @@ + INFO = -3 + ELSE IF( KA.LT.0 ) THEN + INFO = -4 +- ELSE IF( KB.LT.0 ) THEN ++ ELSE IF( KB.LT.0 .OR. KB.GT.KA ) THEN + INFO = -5 + ELSE IF( LDAB.LT.KA+1 ) THEN + INFO = -7 +diff -uNr LAPACK.orig/SRC/sstebz.f LAPACK/SRC/sstebz.f +--- LAPACK.orig/SRC/sstebz.f Thu Nov 4 14:24:00 1999 ++++ LAPACK/SRC/sstebz.f Fri May 25 16:13:18 2001 +@@ -6,6 +6,7 @@ + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University + * June 30, 1999 ++* 8-18-00: Increase FUDGE factor for T3E (eca) + * + * .. Scalar Arguments .. + CHARACTER ORDER, RANGE +@@ -175,7 +176,7 @@ + PARAMETER ( ZERO = 0.0E0, ONE = 1.0E0, TWO = 2.0E0, + $ HALF = 1.0E0 / TWO ) + REAL FUDGE, RELFAC +- PARAMETER ( FUDGE = 2.0E0, RELFAC = 2.0E0 ) ++ PARAMETER ( FUDGE = 2.1E0, RELFAC = 2.0E0 ) + * .. + * .. Local Scalars .. + LOGICAL NCNVRG, TOOFEW +diff -uNr LAPACK.orig/SRC/stgevc.f LAPACK/SRC/stgevc.f +--- LAPACK.orig/SRC/stgevc.f Thu Nov 4 14:26:09 1999 ++++ LAPACK/SRC/stgevc.f Fri May 25 16:13:28 2001 +@@ -1,18 +1,18 @@ +- SUBROUTINE STGEVC( SIDE, HOWMNY, SELECT, N, A, LDA, B, LDB, VL, ++ SUBROUTINE STGEVC( SIDE, HOWMNY, SELECT, N, S, LDS, P, LDP, VL, + $ LDVL, VR, LDVR, MM, M, WORK, INFO ) + * + * -- LAPACK routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* June 30, 1999 ++* May 4, 2001 + * + * .. Scalar Arguments .. + CHARACTER HOWMNY, SIDE +- INTEGER INFO, LDA, LDB, LDVL, LDVR, M, MM, N ++ INTEGER INFO, LDP, LDS, LDVL, LDVR, M, MM, N + * .. + * .. Array Arguments .. + LOGICAL SELECT( * ) +- REAL A( LDA, * ), B( LDB, * ), VL( LDVL, * ), ++ REAL P( LDP, * ), S( LDS, * ), VL( LDVL, * ), + $ VR( LDVR, * ), WORK( * ) + * .. + * +@@ -20,34 +20,30 @@ + * Purpose + * ======= + * +-* STGEVC computes some or all of the right and/or left generalized +-* eigenvectors of a pair of real upper triangular matrices (A,B). +-* +-* The right generalized eigenvector x and the left generalized +-* eigenvector y of (A,B) corresponding to a generalized eigenvalue +-* w are defined by: +-* +-* (A - wB) * x = 0 and y**H * (A - wB) = 0 +-* ++* STGEVC computes some or all of the right and/or left eigenvectors of ++* a pair of real matrices (S,P), where S is a quasi-triangular matrix ++* and P is upper triangular. Matrix pairs of this type are produced by ++* the generalized Schur factorization of a matrix pair (A,B): ++* ++* A = Q*S*Z**T, B = Q*P*Z**T ++* ++* as computed by SGGHRD + SHGEQZ. ++* ++* The right eigenvector x and the left eigenvector y of (S,P) ++* corresponding to an eigenvalue w are defined by: ++* ++* S*x = w*P*x, (y**H)*S = w*(y**H)*P, ++* + * where y**H denotes the conjugate tranpose of y. +-* +-* If an eigenvalue w is determined by zero diagonal elements of both A +-* and B, a unit vector is returned as the corresponding eigenvector. +-* +-* If all eigenvectors are requested, the routine may either return +-* the matrices X and/or Y of right or left eigenvectors of (A,B), or +-* the products Z*X and/or Q*Y, where Z and Q are input orthogonal +-* matrices. If (A,B) was obtained from the generalized real-Schur +-* factorization of an original pair of matrices +-* (A0,B0) = (Q*A*Z**H,Q*B*Z**H), +-* then Z*X and Q*Y are the matrices of right or left eigenvectors of +-* A. +-* +-* A must be block upper triangular, with 1-by-1 and 2-by-2 diagonal +-* blocks. Corresponding to each 2-by-2 diagonal block is a complex +-* conjugate pair of eigenvalues and eigenvectors; only one +-* eigenvector of the pair is computed, namely the one corresponding +-* to the eigenvalue with positive imaginary part. ++* The eigenvalues are not input to this routine, but are computed ++* directly from the diagonal blocks of S and P. ++* ++* This routine returns the matrices X and/or Y of right and left ++* eigenvectors of (S,P), or the products Z*X and/or Q*Y, ++* where Z and Q are input matrices. ++* If Q and Z are the orthogonal factors from the generalized Schur ++* factorization of a matrix pair (A,B), then Z*X and Q*Y ++* are the matrices of right and left eigenvectors of (A,B). + * + * Arguments + * ========= +@@ -59,78 +55,84 @@ + * + * HOWMNY (input) CHARACTER*1 + * = 'A': compute all right and/or left eigenvectors; +-* = 'B': compute all right and/or left eigenvectors, and +-* backtransform them using the input matrices supplied +-* in VR and/or VL; ++* = 'B': compute all right and/or left eigenvectors, ++* backtransformed by the matrices in VR and/or VL; + * = 'S': compute selected right and/or left eigenvectors, + * specified by the logical array SELECT. + * + * SELECT (input) LOGICAL array, dimension (N) + * If HOWMNY='S', SELECT specifies the eigenvectors to be +-* computed. +-* If HOWMNY='A' or 'B', SELECT is not referenced. +-* To select the real eigenvector corresponding to the real +-* eigenvalue w(j), SELECT(j) must be set to .TRUE. To select +-* the complex eigenvector corresponding to a complex conjugate +-* pair w(j) and w(j+1), either SELECT(j) or SELECT(j+1) must +-* be set to .TRUE.. ++* computed. If w(j) is a real eigenvalue, the corresponding ++* real eigenvector is computed if SELECT(j) is .TRUE.. ++* If w(j) and w(j+1) are the real and imaginary parts of a ++* complex eigenvalue, the corresponding complex eigenvector ++* is computed if either SELECT(j) or SELECT(j+1) is .TRUE., ++* and on exit SELECT(j) is set to .TRUE. and SELECT(j+1) is ++* set to .FALSE.. ++* Not referenced if HOWMNY = 'A' or 'B'. + * + * N (input) INTEGER +-* The order of the matrices A and B. N >= 0. ++* The order of the matrices S and P. N >= 0. + * +-* A (input) REAL array, dimension (LDA,N) +-* The upper quasi-triangular matrix A. ++* S (input) REAL array, dimension (LDS,N) ++* The upper quasi-triangular matrix S from a generalized Schur ++* factorization, as computed by SHGEQZ. ++* ++* LDS (input) INTEGER ++* The leading dimension of array S. LDS >= max(1,N). ++* ++* P (input) REAL array, dimension (LDP,N) ++* The upper triangular matrix P from a generalized Schur ++* factorization, as computed by SHGEQZ. ++* 2-by-2 diagonal blocks of P corresponding to 2-by-2 blocks ++* of S must be in positive diagonal form. + * +-* LDA (input) INTEGER +-* The leading dimension of array A. LDA >= max(1, N). +-* +-* B (input) REAL array, dimension (LDB,N) +-* The upper triangular matrix B. If A has a 2-by-2 diagonal +-* block, then the corresponding 2-by-2 block of B must be +-* diagonal with positive elements. +-* +-* LDB (input) INTEGER +-* The leading dimension of array B. LDB >= max(1,N). ++* LDP (input) INTEGER ++* The leading dimension of array P. LDP >= max(1,N). + * + * VL (input/output) REAL array, dimension (LDVL,MM) + * On entry, if SIDE = 'L' or 'B' and HOWMNY = 'B', VL must + * contain an N-by-N matrix Q (usually the orthogonal matrix Q + * of left Schur vectors returned by SHGEQZ). + * On exit, if SIDE = 'L' or 'B', VL contains: +-* if HOWMNY = 'A', the matrix Y of left eigenvectors of (A,B); ++* if HOWMNY = 'A', the matrix Y of left eigenvectors of (S,P); + * if HOWMNY = 'B', the matrix Q*Y; +-* if HOWMNY = 'S', the left eigenvectors of (A,B) specified by ++* if HOWMNY = 'S', the left eigenvectors of (S,P) specified by + * SELECT, stored consecutively in the columns of + * VL, in the same order as their eigenvalues. +-* If SIDE = 'R', VL is not referenced. + * + * A complex eigenvector corresponding to a complex eigenvalue + * is stored in two consecutive columns, the first holding the + * real part, and the second the imaginary part. + * ++* Not referenced if SIDE = 'R'. ++* + * LDVL (input) INTEGER +-* The leading dimension of array VL. +-* LDVL >= max(1,N) if SIDE = 'L' or 'B'; LDVL >= 1 otherwise. ++* The leading dimension of array VL. LDVL >= 1, and if ++* SIDE = 'L' or 'B', LDVL >= N. + * + * VR (input/output) REAL array, dimension (LDVR,MM) + * On entry, if SIDE = 'R' or 'B' and HOWMNY = 'B', VR must +-* contain an N-by-N matrix Q (usually the orthogonal matrix Z ++* contain an N-by-N matrix Z (usually the orthogonal matrix Z + * of right Schur vectors returned by SHGEQZ). ++* + * On exit, if SIDE = 'R' or 'B', VR contains: +-* if HOWMNY = 'A', the matrix X of right eigenvectors of (A,B); +-* if HOWMNY = 'B', the matrix Z*X; +-* if HOWMNY = 'S', the right eigenvectors of (A,B) specified by +-* SELECT, stored consecutively in the columns of +-* VR, in the same order as their eigenvalues. +-* If SIDE = 'L', VR is not referenced. ++* if HOWMNY = 'A', the matrix X of right eigenvectors of (S,P); ++* if HOWMNY = 'B' or 'b', the matrix Z*X; ++* if HOWMNY = 'S' or 's', the right eigenvectors of (S,P) ++* specified by SELECT, stored consecutively in the ++* columns of VR, in the same order as their ++* eigenvalues. + * + * A complex eigenvector corresponding to a complex eigenvalue + * is stored in two consecutive columns, the first holding the + * real part and the second the imaginary part. ++* ++* Not referenced if SIDE = 'L'. + * + * LDVR (input) INTEGER +-* The leading dimension of the array VR. +-* LDVR >= max(1,N) if SIDE = 'R' or 'B'; LDVR >= 1 otherwise. ++* The leading dimension of the array VR. LDVR >= 1, and if ++* SIDE = 'R' or 'B', LDVR >= N. + * + * MM (input) INTEGER + * The number of columns in the arrays VL and/or VR. MM >= M. +@@ -199,7 +201,7 @@ + * partial sums. Since FORTRAN arrays are stored columnwise, this has + * the advantage that at each step, the elements of C that are accessed + * are adjacent to one another, whereas with the rowwise method, the +-* elements accessed at a step are spaced LDA (and LDB) words apart. ++* elements accessed at a step are spaced LDS (and LDP) words apart. + * + * When finding left eigenvectors, the matrix in question is the + * transpose of the one in storage, so the rowwise method then +@@ -226,8 +228,8 @@ + $ XSCALE + * .. + * .. Local Arrays .. +- REAL BDIAG( 2 ), SUM( 2, 2 ), SUMA( 2, 2 ), +- $ SUMB( 2, 2 ) ++ REAL BDIAG( 2 ), SUM( 2, 2 ), SUMS( 2, 2 ), ++ $ SUMP( 2, 2 ) + * .. + * .. External Functions .. + LOGICAL LSAME +@@ -252,7 +254,7 @@ + IHWMNY = 2 + ILALL = .FALSE. + ILBACK = .FALSE. +- ELSE IF( LSAME( HOWMNY, 'B' ) .OR. LSAME( HOWMNY, 'T' ) ) THEN ++ ELSE IF( LSAME( HOWMNY, 'B' ) ) THEN + IHWMNY = 3 + ILALL = .TRUE. + ILBACK = .TRUE. +@@ -284,9 +286,9 @@ + INFO = -2 + ELSE IF( N.LT.0 ) THEN + INFO = -4 +- ELSE IF( LDA.LT.MAX( 1, N ) ) THEN ++ ELSE IF( LDS.LT.MAX( 1, N ) ) THEN + INFO = -6 +- ELSE IF( LDB.LT.MAX( 1, N ) ) THEN ++ ELSE IF( LDP.LT.MAX( 1, N ) ) THEN + INFO = -8 + END IF + IF( INFO.NE.0 ) THEN +@@ -305,7 +307,7 @@ + GO TO 10 + END IF + IF( J.LT.N ) THEN +- IF( A( J+1, J ).NE.ZERO ) ++ IF( S( J+1, J ).NE.ZERO ) + $ ILCPLX = .TRUE. + END IF + IF( ILCPLX ) THEN +@@ -325,11 +327,11 @@ + ILABAD = .FALSE. + ILBBAD = .FALSE. + DO 20 J = 1, N - 1 +- IF( A( J+1, J ).NE.ZERO ) THEN +- IF( B( J, J ).EQ.ZERO .OR. B( J+1, J+1 ).EQ.ZERO .OR. +- $ B( J, J+1 ).NE.ZERO )ILBBAD = .TRUE. ++ IF( S( J+1, J ).NE.ZERO ) THEN ++ IF( P( J, J ).EQ.ZERO .OR. P( J+1, J+1 ).EQ.ZERO .OR. ++ $ P( J, J+1 ).NE.ZERO )ILBBAD = .TRUE. + IF( J.LT.N-1 ) THEN +- IF( A( J+2, J+1 ).NE.ZERO ) ++ IF( S( J+2, J+1 ).NE.ZERO ) + $ ILABAD = .TRUE. + END IF + END IF +@@ -372,30 +374,30 @@ + * blocks) of A and B to check for possible overflow in the + * triangular solver. + * +- ANORM = ABS( A( 1, 1 ) ) ++ ANORM = ABS( S( 1, 1 ) ) + IF( N.GT.1 ) +- $ ANORM = ANORM + ABS( A( 2, 1 ) ) +- BNORM = ABS( B( 1, 1 ) ) ++ $ ANORM = ANORM + ABS( S( 2, 1 ) ) ++ BNORM = ABS( P( 1, 1 ) ) + WORK( 1 ) = ZERO + WORK( N+1 ) = ZERO + * + DO 50 J = 2, N + TEMP = ZERO + TEMP2 = ZERO +- IF( A( J, J-1 ).EQ.ZERO ) THEN ++ IF( S( J, J-1 ).EQ.ZERO ) THEN + IEND = J - 1 + ELSE + IEND = J - 2 + END IF + DO 30 I = 1, IEND +- TEMP = TEMP + ABS( A( I, J ) ) +- TEMP2 = TEMP2 + ABS( B( I, J ) ) ++ TEMP = TEMP + ABS( S( I, J ) ) ++ TEMP2 = TEMP2 + ABS( P( I, J ) ) + 30 CONTINUE + WORK( J ) = TEMP + WORK( N+J ) = TEMP2 + DO 40 I = IEND + 1, MIN( J+1, N ) +- TEMP = TEMP + ABS( A( I, J ) ) +- TEMP2 = TEMP2 + ABS( B( I, J ) ) ++ TEMP = TEMP + ABS( S( I, J ) ) ++ TEMP2 = TEMP2 + ABS( P( I, J ) ) + 40 CONTINUE + ANORM = MAX( ANORM, TEMP ) + BNORM = MAX( BNORM, TEMP2 ) +@@ -425,7 +427,7 @@ + END IF + NW = 1 + IF( JE.LT.N ) THEN +- IF( A( JE+1, JE ).NE.ZERO ) THEN ++ IF( S( JE+1, JE ).NE.ZERO ) THEN + ILCPLX = .TRUE. + NW = 2 + END IF +@@ -444,8 +446,8 @@ + * (c) complex eigenvalue. + * + IF( .NOT.ILCPLX ) THEN +- IF( ABS( A( JE, JE ) ).LE.SAFMIN .AND. +- $ ABS( B( JE, JE ) ).LE.SAFMIN ) THEN ++ IF( ABS( S( JE, JE ) ).LE.SAFMIN .AND. ++ $ ABS( P( JE, JE ) ).LE.SAFMIN ) THEN + * + * Singular matrix pencil -- return unit eigenvector + * +@@ -472,10 +474,10 @@ + * + * Real eigenvalue + * +- TEMP = ONE / MAX( ABS( A( JE, JE ) )*ASCALE, +- $ ABS( B( JE, JE ) )*BSCALE, SAFMIN ) +- SALFAR = ( TEMP*A( JE, JE ) )*ASCALE +- SBETA = ( TEMP*B( JE, JE ) )*BSCALE ++ TEMP = ONE / MAX( ABS( S( JE, JE ) )*ASCALE, ++ $ ABS( P( JE, JE ) )*BSCALE, SAFMIN ) ++ SALFAR = ( TEMP*S( JE, JE ) )*ASCALE ++ SBETA = ( TEMP*P( JE, JE ) )*BSCALE + ACOEF = SBETA*ASCALE + BCOEFR = SALFAR*BSCALE + BCOEFI = ZERO +@@ -517,7 +519,7 @@ + * + * Complex eigenvalue + * +- CALL SLAG2( A( JE, JE ), LDA, B( JE, JE ), LDB, ++ CALL SLAG2( S( JE, JE ), LDS, P( JE, JE ), LDP, + $ SAFMIN*SAFETY, ACOEF, TEMP, BCOEFR, TEMP2, + $ BCOEFI ) + BCOEFI = -BCOEFI +@@ -549,9 +551,9 @@ + * + * Compute first two components of eigenvector + * +- TEMP = ACOEF*A( JE+1, JE ) +- TEMP2R = ACOEF*A( JE, JE ) - BCOEFR*B( JE, JE ) +- TEMP2I = -BCOEFI*B( JE, JE ) ++ TEMP = ACOEF*S( JE+1, JE ) ++ TEMP2R = ACOEF*S( JE, JE ) - BCOEFR*P( JE, JE ) ++ TEMP2I = -BCOEFI*P( JE, JE ) + IF( ABS( TEMP ).GT.ABS( TEMP2R )+ABS( TEMP2I ) ) THEN + WORK( 2*N+JE ) = ONE + WORK( 3*N+JE ) = ZERO +@@ -560,10 +562,10 @@ + ELSE + WORK( 2*N+JE+1 ) = ONE + WORK( 3*N+JE+1 ) = ZERO +- TEMP = ACOEF*A( JE, JE+1 ) +- WORK( 2*N+JE ) = ( BCOEFR*B( JE+1, JE+1 )-ACOEF* +- $ A( JE+1, JE+1 ) ) / TEMP +- WORK( 3*N+JE ) = BCOEFI*B( JE+1, JE+1 ) / TEMP ++ TEMP = ACOEF*S( JE, JE+1 ) ++ WORK( 2*N+JE ) = ( BCOEFR*P( JE+1, JE+1 )-ACOEF* ++ $ S( JE+1, JE+1 ) ) / TEMP ++ WORK( 3*N+JE ) = BCOEFI*P( JE+1, JE+1 ) / TEMP + END IF + XMAX = MAX( ABS( WORK( 2*N+JE ) )+ABS( WORK( 3*N+JE ) ), + $ ABS( WORK( 2*N+JE+1 ) )+ABS( WORK( 3*N+JE+1 ) ) ) +@@ -586,11 +588,11 @@ + END IF + * + NA = 1 +- BDIAG( 1 ) = B( J, J ) ++ BDIAG( 1 ) = P( J, J ) + IF( J.LT.N ) THEN +- IF( A( J+1, J ).NE.ZERO ) THEN ++ IF( S( J+1, J ).NE.ZERO ) THEN + IL2BY2 = .TRUE. +- BDIAG( 2 ) = B( J+1, J+1 ) ++ BDIAG( 2 ) = P( J+1, J+1 ) + NA = 2 + END IF + END IF +@@ -616,13 +618,13 @@ + * Compute dot products + * + * j-1 +-* SUM = sum conjg( a*A(k,j) - b*B(k,j) )*x(k) ++* SUM = sum conjg( a*S(k,j) - b*P(k,j) )*x(k) + * k=je + * + * To reduce the op count, this is done as + * + * _ j-1 _ j-1 +-* a*conjg( sum A(k,j)*x(k) ) - b*conjg( sum B(k,j)*x(k) ) ++* a*conjg( sum S(k,j)*x(k) ) - b*conjg( sum P(k,j)*x(k) ) + * k=je k=je + * + * which may cause underflow problems if A or B are close +@@ -659,15 +661,15 @@ + *$PL$ CMCHAR='*' + * + DO 110 JA = 1, NA +- SUMA( JA, JW ) = ZERO +- SUMB( JA, JW ) = ZERO ++ SUMS( JA, JW ) = ZERO ++ SUMP( JA, JW ) = ZERO + * + DO 100 JR = JE, J - 1 +- SUMA( JA, JW ) = SUMA( JA, JW ) + +- $ A( JR, J+JA-1 )* ++ SUMS( JA, JW ) = SUMS( JA, JW ) + ++ $ S( JR, J+JA-1 )* + $ WORK( ( JW+1 )*N+JR ) +- SUMB( JA, JW ) = SUMB( JA, JW ) + +- $ B( JR, J+JA-1 )* ++ SUMP( JA, JW ) = SUMP( JA, JW ) + ++ $ P( JR, J+JA-1 )* + $ WORK( ( JW+1 )*N+JR ) + 100 CONTINUE + 110 CONTINUE +@@ -687,15 +689,15 @@ + * + DO 130 JA = 1, NA + IF( ILCPLX ) THEN +- SUM( JA, 1 ) = -ACOEF*SUMA( JA, 1 ) + +- $ BCOEFR*SUMB( JA, 1 ) - +- $ BCOEFI*SUMB( JA, 2 ) +- SUM( JA, 2 ) = -ACOEF*SUMA( JA, 2 ) + +- $ BCOEFR*SUMB( JA, 2 ) + +- $ BCOEFI*SUMB( JA, 1 ) ++ SUM( JA, 1 ) = -ACOEF*SUMS( JA, 1 ) + ++ $ BCOEFR*SUMP( JA, 1 ) - ++ $ BCOEFI*SUMP( JA, 2 ) ++ SUM( JA, 2 ) = -ACOEF*SUMS( JA, 2 ) + ++ $ BCOEFR*SUMP( JA, 2 ) + ++ $ BCOEFI*SUMP( JA, 1 ) + ELSE +- SUM( JA, 1 ) = -ACOEF*SUMA( JA, 1 ) + +- $ BCOEFR*SUMB( JA, 1 ) ++ SUM( JA, 1 ) = -ACOEF*SUMS( JA, 1 ) + ++ $ BCOEFR*SUMP( JA, 1 ) + END IF + 130 CONTINUE + * +@@ -703,7 +705,7 @@ + * Solve ( a A - b B ) y = SUM(,) + * with scaling and perturbation of the denominator + * +- CALL SLALN2( .TRUE., NA, NW, DMIN, ACOEF, A( J, J ), LDA, ++ CALL SLALN2( .TRUE., NA, NW, DMIN, ACOEF, S( J, J ), LDS, + $ BDIAG( 1 ), BDIAG( 2 ), SUM, 2, BCOEFR, + $ BCOEFI, WORK( 2*N+J ), N, SCALE, TEMP, + $ IINFO ) +@@ -790,7 +792,7 @@ + END IF + NW = 1 + IF( JE.GT.1 ) THEN +- IF( A( JE, JE-1 ).NE.ZERO ) THEN ++ IF( S( JE, JE-1 ).NE.ZERO ) THEN + ILCPLX = .TRUE. + NW = 2 + END IF +@@ -809,8 +811,8 @@ + * (c) complex eigenvalue. + * + IF( .NOT.ILCPLX ) THEN +- IF( ABS( A( JE, JE ) ).LE.SAFMIN .AND. +- $ ABS( B( JE, JE ) ).LE.SAFMIN ) THEN ++ IF( ABS( S( JE, JE ) ).LE.SAFMIN .AND. ++ $ ABS( P( JE, JE ) ).LE.SAFMIN ) THEN + * + * Singular matrix pencil -- unit eigenvector + * +@@ -839,10 +841,10 @@ + * + * Real eigenvalue + * +- TEMP = ONE / MAX( ABS( A( JE, JE ) )*ASCALE, +- $ ABS( B( JE, JE ) )*BSCALE, SAFMIN ) +- SALFAR = ( TEMP*A( JE, JE ) )*ASCALE +- SBETA = ( TEMP*B( JE, JE ) )*BSCALE ++ TEMP = ONE / MAX( ABS( S( JE, JE ) )*ASCALE, ++ $ ABS( P( JE, JE ) )*BSCALE, SAFMIN ) ++ SALFAR = ( TEMP*S( JE, JE ) )*ASCALE ++ SBETA = ( TEMP*P( JE, JE ) )*BSCALE + ACOEF = SBETA*ASCALE + BCOEFR = SALFAR*BSCALE + BCOEFI = ZERO +@@ -885,14 +887,14 @@ + * (See "Further Details", above.) + * + DO 260 JR = 1, JE - 1 +- WORK( 2*N+JR ) = BCOEFR*B( JR, JE ) - +- $ ACOEF*A( JR, JE ) ++ WORK( 2*N+JR ) = BCOEFR*P( JR, JE ) - ++ $ ACOEF*S( JR, JE ) + 260 CONTINUE + ELSE + * + * Complex eigenvalue + * +- CALL SLAG2( A( JE-1, JE-1 ), LDA, B( JE-1, JE-1 ), LDB, ++ CALL SLAG2( S( JE-1, JE-1 ), LDS, P( JE-1, JE-1 ), LDP, + $ SAFMIN*SAFETY, ACOEF, TEMP, BCOEFR, TEMP2, + $ BCOEFI ) + IF( BCOEFI.EQ.ZERO ) THEN +@@ -924,9 +926,9 @@ + * Compute first two components of eigenvector + * and contribution to sums + * +- TEMP = ACOEF*A( JE, JE-1 ) +- TEMP2R = ACOEF*A( JE, JE ) - BCOEFR*B( JE, JE ) +- TEMP2I = -BCOEFI*B( JE, JE ) ++ TEMP = ACOEF*S( JE, JE-1 ) ++ TEMP2R = ACOEF*S( JE, JE ) - BCOEFR*P( JE, JE ) ++ TEMP2I = -BCOEFI*P( JE, JE ) + IF( ABS( TEMP ).GE.ABS( TEMP2R )+ABS( TEMP2I ) ) THEN + WORK( 2*N+JE ) = ONE + WORK( 3*N+JE ) = ZERO +@@ -935,10 +937,10 @@ + ELSE + WORK( 2*N+JE-1 ) = ONE + WORK( 3*N+JE-1 ) = ZERO +- TEMP = ACOEF*A( JE-1, JE ) +- WORK( 2*N+JE ) = ( BCOEFR*B( JE-1, JE-1 )-ACOEF* +- $ A( JE-1, JE-1 ) ) / TEMP +- WORK( 3*N+JE ) = BCOEFI*B( JE-1, JE-1 ) / TEMP ++ TEMP = ACOEF*S( JE-1, JE ) ++ WORK( 2*N+JE ) = ( BCOEFR*P( JE-1, JE-1 )-ACOEF* ++ $ S( JE-1, JE-1 ) ) / TEMP ++ WORK( 3*N+JE ) = BCOEFI*P( JE-1, JE-1 ) / TEMP + END IF + * + XMAX = MAX( ABS( WORK( 2*N+JE ) )+ABS( WORK( 3*N+JE ) ), +@@ -958,12 +960,12 @@ + CRE2B = BCOEFR*WORK( 2*N+JE ) - BCOEFI*WORK( 3*N+JE ) + CIM2B = BCOEFI*WORK( 2*N+JE ) + BCOEFR*WORK( 3*N+JE ) + DO 270 JR = 1, JE - 2 +- WORK( 2*N+JR ) = -CREALA*A( JR, JE-1 ) + +- $ CREALB*B( JR, JE-1 ) - +- $ CRE2A*A( JR, JE ) + CRE2B*B( JR, JE ) +- WORK( 3*N+JR ) = -CIMAGA*A( JR, JE-1 ) + +- $ CIMAGB*B( JR, JE-1 ) - +- $ CIM2A*A( JR, JE ) + CIM2B*B( JR, JE ) ++ WORK( 2*N+JR ) = -CREALA*S( JR, JE-1 ) + ++ $ CREALB*P( JR, JE-1 ) - ++ $ CRE2A*S( JR, JE ) + CRE2B*P( JR, JE ) ++ WORK( 3*N+JR ) = -CIMAGA*S( JR, JE-1 ) + ++ $ CIMAGB*P( JR, JE-1 ) - ++ $ CIM2A*S( JR, JE ) + CIM2B*P( JR, JE ) + 270 CONTINUE + END IF + * +@@ -978,23 +980,23 @@ + * next iteration to process it (when it will be j:j+1) + * + IF( .NOT.IL2BY2 .AND. J.GT.1 ) THEN +- IF( A( J, J-1 ).NE.ZERO ) THEN ++ IF( S( J, J-1 ).NE.ZERO ) THEN + IL2BY2 = .TRUE. + GO TO 370 + END IF + END IF +- BDIAG( 1 ) = B( J, J ) ++ BDIAG( 1 ) = P( J, J ) + IF( IL2BY2 ) THEN + NA = 2 +- BDIAG( 2 ) = B( J+1, J+1 ) ++ BDIAG( 2 ) = P( J+1, J+1 ) + ELSE + NA = 1 + END IF + * + * Compute x(j) (and x(j+1), if 2-by-2 block) + * +- CALL SLALN2( .FALSE., NA, NW, DMIN, ACOEF, A( J, J ), +- $ LDA, BDIAG( 1 ), BDIAG( 2 ), WORK( 2*N+J ), ++ CALL SLALN2( .FALSE., NA, NW, DMIN, ACOEF, S( J, J ), ++ $ LDS, BDIAG( 1 ), BDIAG( 2 ), WORK( 2*N+J ), + $ N, BCOEFR, BCOEFI, SUM, 2, SCALE, TEMP, + $ IINFO ) + IF( SCALE.LT.ONE ) THEN +@@ -1014,7 +1016,7 @@ + 300 CONTINUE + 310 CONTINUE + * +-* w = w + x(j)*(a A(*,j) - b B(*,j) ) with scaling ++* w = w + x(j)*(a S(*,j) - b P(*,j) ) with scaling + * + IF( J.GT.1 ) THEN + * +@@ -1052,19 +1054,19 @@ + $ BCOEFR*WORK( 3*N+J+JA-1 ) + DO 340 JR = 1, J - 1 + WORK( 2*N+JR ) = WORK( 2*N+JR ) - +- $ CREALA*A( JR, J+JA-1 ) + +- $ CREALB*B( JR, J+JA-1 ) ++ $ CREALA*S( JR, J+JA-1 ) + ++ $ CREALB*P( JR, J+JA-1 ) + WORK( 3*N+JR ) = WORK( 3*N+JR ) - +- $ CIMAGA*A( JR, J+JA-1 ) + +- $ CIMAGB*B( JR, J+JA-1 ) ++ $ CIMAGA*S( JR, J+JA-1 ) + ++ $ CIMAGB*P( JR, J+JA-1 ) + 340 CONTINUE + ELSE + CREALA = ACOEF*WORK( 2*N+J+JA-1 ) + CREALB = BCOEFR*WORK( 2*N+J+JA-1 ) + DO 350 JR = 1, J - 1 + WORK( 2*N+JR ) = WORK( 2*N+JR ) - +- $ CREALA*A( JR, J+JA-1 ) + +- $ CREALB*B( JR, J+JA-1 ) ++ $ CREALA*S( JR, J+JA-1 ) + ++ $ CREALB*P( JR, J+JA-1 ) + 350 CONTINUE + END IF + 360 CONTINUE +diff -uNr LAPACK.orig/SRC/strevc.f LAPACK/SRC/strevc.f +--- LAPACK.orig/SRC/strevc.f Thu Nov 4 14:24:06 1999 ++++ LAPACK/SRC/strevc.f Fri May 25 16:13:46 2001 +@@ -4,7 +4,7 @@ + * -- LAPACK routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* June 30, 1999 ++* May 7, 2001 + * + * .. Scalar Arguments .. + CHARACTER HOWMNY, SIDE +@@ -21,28 +21,23 @@ + * + * STREVC computes some or all of the right and/or left eigenvectors of + * a real upper quasi-triangular matrix T. +-* ++* Matrices of this type are produced by the Schur factorization of ++* a real general matrix: A = Q*T*Q**T, as computed by SHSEQR. ++* + * The right eigenvector x and the left eigenvector y of T corresponding + * to an eigenvalue w are defined by: +-* +-* T*x = w*x, y'*T = w*y' +-* +-* where y' denotes the conjugate transpose of the vector y. +-* +-* If all eigenvectors are requested, the routine may either return the +-* matrices X and/or Y of right or left eigenvectors of T, or the +-* products Q*X and/or Q*Y, where Q is an input orthogonal +-* matrix. If T was obtained from the real-Schur factorization of an +-* original matrix A = Q*T*Q', then Q*X and Q*Y are the matrices of +-* right or left eigenvectors of A. +-* +-* T must be in Schur canonical form (as returned by SHSEQR), that is, +-* block upper triangular with 1-by-1 and 2-by-2 diagonal blocks; each +-* 2-by-2 diagonal block has its diagonal elements equal and its +-* off-diagonal elements of opposite sign. Corresponding to each 2-by-2 +-* diagonal block is a complex conjugate pair of eigenvalues and +-* eigenvectors; only one eigenvector of the pair is computed, namely +-* the one corresponding to the eigenvalue with positive imaginary part. ++* ++* T*x = w*x, (y**H)*T = w*(y**H) ++* ++* where y**H denotes the conjugate transpose of y. ++* The eigenvalues are not input to this routine, but are read directly ++* from the diagonal blocks of T. ++* ++* This routine returns the matrices X and/or Y of right and left ++* eigenvectors of T, or the products Q*X and/or Q*Y, where Q is an ++* input matrix. If Q is the orthogonal factor that reduces a matrix ++* A to Schur form T, then Q*X and Q*Y are the matrices of right and ++* left eigenvectors of A. + * + * Arguments + * ========= +@@ -55,21 +50,21 @@ + * HOWMNY (input) CHARACTER*1 + * = 'A': compute all right and/or left eigenvectors; + * = 'B': compute all right and/or left eigenvectors, +-* and backtransform them using the input matrices +-* supplied in VR and/or VL; ++* backtransformed by the matrices in VR and/or VL; + * = 'S': compute selected right and/or left eigenvectors, +-* specified by the logical array SELECT. ++* as indicated by the logical array SELECT. + * + * SELECT (input/output) LOGICAL array, dimension (N) + * If HOWMNY = 'S', SELECT specifies the eigenvectors to be + * computed. +-* If HOWMNY = 'A' or 'B', SELECT is not referenced. +-* To select the real eigenvector corresponding to a real +-* eigenvalue w(j), SELECT(j) must be set to .TRUE.. To select +-* the complex eigenvector corresponding to a complex conjugate +-* pair w(j) and w(j+1), either SELECT(j) or SELECT(j+1) must be +-* set to .TRUE.; then on exit SELECT(j) is .TRUE. and +-* SELECT(j+1) is .FALSE.. ++* If w(j) is a real eigenvalue, the corresponding real ++* eigenvector is computed if SELECT(j) is .TRUE.. ++* If w(j) and w(j+1) are the real and imaginary parts of a ++* complex eigenvalue, the corresponding complex eigenvector is ++* computed if either SELECT(j) or SELECT(j+1) is .TRUE., and ++* on exit SELECT(j) is set to .TRUE. and SELECT(j+1) is set to ++* .FALSE.. ++* Not referenced if HOWMNY = 'A' or 'B'. + * + * N (input) INTEGER + * The order of the matrix T. N >= 0. +@@ -86,15 +81,6 @@ + * of Schur vectors returned by SHSEQR). + * On exit, if SIDE = 'L' or 'B', VL contains: + * if HOWMNY = 'A', the matrix Y of left eigenvectors of T; +-* VL has the same quasi-lower triangular form +-* as T'. If T(i,i) is a real eigenvalue, then +-* the i-th column VL(i) of VL is its +-* corresponding eigenvector. If T(i:i+1,i:i+1) +-* is a 2-by-2 block whose eigenvalues are +-* complex-conjugate eigenvalues of T, then +-* VL(i)+sqrt(-1)*VL(i+1) is the complex +-* eigenvector corresponding to the eigenvalue +-* with positive real part. + * if HOWMNY = 'B', the matrix Q*Y; + * if HOWMNY = 'S', the left eigenvectors of T specified by + * SELECT, stored consecutively in the columns +@@ -103,11 +89,11 @@ + * A complex eigenvector corresponding to a complex eigenvalue + * is stored in two consecutive columns, the first holding the + * real part, and the second the imaginary part. +-* If SIDE = 'R', VL is not referenced. ++* Not referenced if SIDE = 'R'. + * + * LDVL (input) INTEGER +-* The leading dimension of the array VL. LDVL >= max(1,N) if +-* SIDE = 'L' or 'B'; LDVL >= 1 otherwise. ++* The leading dimension of the array VL. LDVL >= 1, and if ++* SIDE = 'L' or 'B', LDVL >= N. + * + * VR (input/output) REAL array, dimension (LDVR,MM) + * On entry, if SIDE = 'R' or 'B' and HOWMNY = 'B', VR must +@@ -115,15 +101,6 @@ + * of Schur vectors returned by SHSEQR). + * On exit, if SIDE = 'R' or 'B', VR contains: + * if HOWMNY = 'A', the matrix X of right eigenvectors of T; +-* VR has the same quasi-upper triangular form +-* as T. If T(i,i) is a real eigenvalue, then +-* the i-th column VR(i) of VR is its +-* corresponding eigenvector. If T(i:i+1,i:i+1) +-* is a 2-by-2 block whose eigenvalues are +-* complex-conjugate eigenvalues of T, then +-* VR(i)+sqrt(-1)*VR(i+1) is the complex +-* eigenvector corresponding to the eigenvalue +-* with positive real part. + * if HOWMNY = 'B', the matrix Q*X; + * if HOWMNY = 'S', the right eigenvectors of T specified by + * SELECT, stored consecutively in the columns +@@ -132,11 +109,11 @@ + * A complex eigenvector corresponding to a complex eigenvalue + * is stored in two consecutive columns, the first holding the + * real part and the second the imaginary part. +-* If SIDE = 'L', VR is not referenced. ++* Not referenced if SIDE = 'L'. + * + * LDVR (input) INTEGER +-* The leading dimension of the array VR. LDVR >= max(1,N) if +-* SIDE = 'R' or 'B'; LDVR >= 1 otherwise. ++* The leading dimension of the array VR. LDVR >= 1, and if ++* SIDE = 'R' or 'B', LDVR >= N. + * + * MM (input) INTEGER + * The number of columns in the arrays VL and/or VR. MM >= M. +diff -uNr LAPACK.orig/SRC/strsen.f LAPACK/SRC/strsen.f +--- LAPACK.orig/SRC/strsen.f Thu Nov 4 14:24:06 1999 ++++ LAPACK/SRC/strsen.f Fri May 25 16:14:06 2001 +@@ -4,7 +4,7 @@ + * -- LAPACK routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* June 30, 1999 ++* January 3, 2001 + * + * .. Scalar Arguments .. + CHARACTER COMPQ, JOB +@@ -118,8 +118,8 @@ + * LWORK (input) INTEGER + * The dimension of the array WORK. + * If JOB = 'N', LWORK >= max(1,N); +-* if JOB = 'E', LWORK >= M*(N-M); +-* if JOB = 'V' or 'B', LWORK >= 2*M*(N-M). ++* if JOB = 'E', LWORK >= max(1,M*(N-M)); ++* if JOB = 'V' or 'B', LWORK >= max(1,2*M*(N-M)). + * + * If LWORK = -1, then a workspace query is assumed; the routine + * only calculates the optimal size of the WORK array, returns +@@ -127,12 +127,12 @@ + * message related to LWORK is issued by XERBLA. + * + * IWORK (workspace) INTEGER array, dimension (LIWORK) +-* IF JOB = 'N' or 'E', IWORK is not referenced. ++* On exit, if INFO = 0, IWORK(1) returns the optimal LIWORK. + * + * LIWORK (input) INTEGER + * The dimension of the array IWORK. + * If JOB = 'N' or 'E', LIWORK >= 1; +-* if JOB = 'V' or 'B', LIWORK >= M*(N-M). ++* if JOB = 'V' or 'B', LIWORK >= max(1,M*(N-M)). + * + * If LIWORK = -1, then a workspace query is assumed; the + * routine only calculates the optimal size of the IWORK array, +diff -uNr LAPACK.orig/SRC/zbdsqr.f LAPACK/SRC/zbdsqr.f +--- LAPACK.orig/SRC/zbdsqr.f Thu Nov 4 14:25:42 1999 ++++ LAPACK/SRC/zbdsqr.f Fri May 25 15:59:12 2001 +@@ -4,7 +4,7 @@ + * -- LAPACK routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* October 31, 1999 ++* April 25, 2001 + * + * .. Scalar Arguments .. + CHARACTER UPLO +@@ -18,14 +18,26 @@ + * Purpose + * ======= + * +-* ZBDSQR computes the singular value decomposition (SVD) of a real +-* N-by-N (upper or lower) bidiagonal matrix B: B = Q * S * P' (P' +-* denotes the transpose of P), where S is a diagonal matrix with +-* non-negative diagonal elements (the singular values of B), and Q +-* and P are orthogonal matrices. +-* +-* The routine computes S, and optionally computes U * Q, P' * VT, +-* or Q' * C, for given complex input matrices U, VT, and C. ++* ZBDSQR computes the singular values and, optionally, the right and/or ++* left singular vectors from the singular value decomposition (SVD) of ++* a real N-by-N (upper or lower) bidiagonal matrix B using the implicit ++* zero-shift QR algorithm. The SVD of B has the form ++* ++* B = Q * S * P**H ++* ++* where S is the diagonal matrix of singular values, Q is an orthogonal ++* matrix of left singular vectors, and P is an orthogonal matrix of ++* right singular vectors. If left singular vectors are requested, this ++* subroutine actually returns U*Q instead of Q, and, if right singular ++* vectors are requested, this subroutine returns P**H*VT instead of ++* P**H, for given complex input matrices U and VT. When U and VT are ++* the unitary matrices that reduce a general matrix A to bidiagonal ++* form: A = U*B*VT, as computed by ZGEBRD, then ++* ++* A = (U*Q) * S * (P**H*VT) ++* ++* is the SVD of A. Optionally, the subroutine may also compute Q**H*C ++* for a given complex input matrix C. + * + * See "Computing Small Singular Values of Bidiagonal Matrices With + * Guaranteed High Relative Accuracy," by J. Demmel and W. Kahan, +@@ -61,18 +73,17 @@ + * order. + * + * E (input/output) DOUBLE PRECISION array, dimension (N) +-* On entry, the elements of E contain the +-* offdiagonal elements of of the bidiagonal matrix whose SVD +-* is desired. On normal exit (INFO = 0), E is destroyed. +-* If the algorithm does not converge (INFO > 0), D and E ++* On entry, the N-1 offdiagonal elements of the bidiagonal ++* matrix B. ++* On exit, if INFO = 0, E is destroyed; if INFO > 0, D and E + * will contain the diagonal and superdiagonal elements of a + * bidiagonal matrix orthogonally equivalent to the one given + * as input. E(N) is used for workspace. + * + * VT (input/output) COMPLEX*16 array, dimension (LDVT, NCVT) + * On entry, an N-by-NCVT matrix VT. +-* On exit, VT is overwritten by P' * VT. +-* VT is not referenced if NCVT = 0. ++* On exit, VT is overwritten by P**H * VT. ++* Not referenced if NCVT = 0. + * + * LDVT (input) INTEGER + * The leading dimension of the array VT. +@@ -81,21 +92,22 @@ + * U (input/output) COMPLEX*16 array, dimension (LDU, N) + * On entry, an NRU-by-N matrix U. + * On exit, U is overwritten by U * Q. +-* U is not referenced if NRU = 0. ++* Not referenced if NRU = 0. + * + * LDU (input) INTEGER + * The leading dimension of the array U. LDU >= max(1,NRU). + * + * C (input/output) COMPLEX*16 array, dimension (LDC, NCC) + * On entry, an N-by-NCC matrix C. +-* On exit, C is overwritten by Q' * C. +-* C is not referenced if NCC = 0. ++* On exit, C is overwritten by Q**H * C. ++* Not referenced if NCC = 0. + * + * LDC (input) INTEGER + * The leading dimension of the array C. + * LDC >= max(1,N) if NCC > 0; LDC >=1 if NCC = 0. + * +-* RWORK (workspace) DOUBLE PRECISION array, dimension (4*N) ++* RWORK (workspace) DOUBLE PRECISION array, dimension (2*N) ++* if NCVT = NRU = NCC = 0, (max(1, 4*N-4)) otherwise + * + * INFO (output) INTEGER + * = 0: successful exit +diff -uNr LAPACK.orig/SRC/zgebd2.f LAPACK/SRC/zgebd2.f +--- LAPACK.orig/SRC/zgebd2.f Thu Nov 4 14:25:01 1999 ++++ LAPACK/SRC/zgebd2.f Fri May 25 15:59:31 2001 +@@ -3,7 +3,7 @@ + * -- LAPACK routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* September 30, 1994 ++* May 7, 2001 + * + * .. Scalar Arguments .. + INTEGER INFO, LDA, M, N +@@ -172,8 +172,9 @@ + * + * Apply H(i)' to A(i:m,i+1:n) from the left + * +- CALL ZLARF( 'Left', M-I+1, N-I, A( I, I ), 1, +- $ DCONJG( TAUQ( I ) ), A( I, I+1 ), LDA, WORK ) ++ IF( I.LT.N ) ++ $ CALL ZLARF( 'Left', M-I+1, N-I, A( I, I ), 1, ++ $ DCONJG( TAUQ( I ) ), A( I, I+1 ), LDA, WORK ) + A( I, I ) = D( I ) + * + IF( I.LT.N ) THEN +@@ -215,8 +216,9 @@ + * + * Apply G(i) to A(i+1:m,i:n) from the right + * +- CALL ZLARF( 'Right', M-I, N-I+1, A( I, I ), LDA, TAUP( I ), +- $ A( MIN( I+1, M ), I ), LDA, WORK ) ++ IF( I.LT.M ) ++ $ CALL ZLARF( 'Right', M-I, N-I+1, A( I, I ), LDA, ++ $ TAUP( I ), A( MIN( I+1, M ), I ), LDA, WORK ) + CALL ZLACGV( N-I+1, A( I, I ), LDA ) + A( I, I ) = D( I ) + * +diff -uNr LAPACK.orig/SRC/zgees.f LAPACK/SRC/zgees.f +--- LAPACK.orig/SRC/zgees.f Thu Nov 4 14:25:01 1999 ++++ LAPACK/SRC/zgees.f Fri May 25 16:00:01 2001 +@@ -5,6 +5,7 @@ + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University + * June 30, 1999 ++* 8-15-00: Improve consistency of WS calculations (eca) + * + * .. Scalar Arguments .. + CHARACTER JOBVS, SORT +@@ -89,10 +90,9 @@ + * The dimension of the array WORK. LWORK >= max(1,2*N). + * For good performance, LWORK must generally be larger. + * +-* If LWORK = -1, then a workspace query is assumed; the routine +-* only calculates the optimal size of the WORK array, returns +-* this value as the first entry of the WORK array, and no error +-* message related to LWORK is issued by XERBLA. ++* If LWORK = -1, a workspace query is assumed. The optimal ++* size for the WORK array is calculated and stored in WORK(1), ++* and no other work except argument checking is performed. + * + * RWORK (workspace) DOUBLE PRECISION array, dimension (N) + * +@@ -120,11 +120,13 @@ + * ===================================================================== + * + * .. Parameters .. ++ INTEGER LQUERV ++ PARAMETER ( LQUERV = -1 ) + DOUBLE PRECISION ZERO, ONE + PARAMETER ( ZERO = 0.0D0, ONE = 1.0D0 ) + * .. + * .. Local Scalars .. +- LOGICAL LQUERY, SCALEA, WANTST, WANTVS ++ LOGICAL SCALEA, WANTST, WANTVS + INTEGER HSWORK, I, IBAL, ICOND, IERR, IEVAL, IHI, ILO, + $ ITAU, IWRK, K, MAXB, MAXWRK, MINWRK + DOUBLE PRECISION ANRM, BIGNUM, CSCALE, EPS, S, SEP, SMLNUM +@@ -133,8 +135,8 @@ + DOUBLE PRECISION DUM( 1 ) + * .. + * .. External Subroutines .. +- EXTERNAL XERBLA, ZCOPY, ZGEBAK, ZGEBAL, ZGEHRD, ZHSEQR, +- $ ZLACPY, ZLASCL, ZTRSEN, ZUNGHR ++ EXTERNAL DLABAD, XERBLA, ZCOPY, ZGEBAK, ZGEBAL, ZGEHRD, ++ $ ZHSEQR, ZLACPY, ZLASCL, ZTRSEN, ZUNGHR + * .. + * .. External Functions .. + LOGICAL LSAME +@@ -150,7 +152,6 @@ + * Test the input arguments + * + INFO = 0 +- LQUERY = ( LWORK.EQ.-1 ) + WANTVS = LSAME( JOBVS, 'V' ) + WANTST = LSAME( SORT, 'S' ) + IF( ( .NOT.WANTVS ) .AND. ( .NOT.LSAME( JOBVS, 'N' ) ) ) THEN +@@ -177,7 +178,7 @@ + * the worst case.) + * + MINWRK = 1 +- IF( INFO.EQ.0 .AND. ( LWORK.GE.1 .OR. LQUERY ) ) THEN ++ IF( INFO.EQ.0 ) THEN + MAXWRK = N + N*ILAENV( 1, 'ZGEHRD', ' ', N, 1, N, 0 ) + MINWRK = MAX( 1, 2*N ) + IF( .NOT.WANTVS ) THEN +@@ -196,19 +197,18 @@ + MAXWRK = MAX( MAXWRK, HSWORK, 1 ) + END IF + WORK( 1 ) = MAXWRK ++ IF( LWORK.LT.MINWRK .AND. LWORK.NE.LQUERV ) ++ $ INFO = -12 + END IF +- IF( LWORK.LT.MINWRK .AND. .NOT.LQUERY ) THEN +- INFO = -12 +- END IF ++* ++* Quick returns ++* + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'ZGEES ', -INFO ) + RETURN +- ELSE IF( LQUERY ) THEN +- RETURN + END IF +-* +-* Quick return if possible +-* ++ IF( LWORK.EQ.LQUERV ) ++ $ RETURN + IF( N.EQ.0 ) THEN + SDIM = 0 + RETURN +diff -uNr LAPACK.orig/SRC/zgeesx.f LAPACK/SRC/zgeesx.f +--- LAPACK.orig/SRC/zgeesx.f Thu Nov 4 14:25:01 1999 ++++ LAPACK/SRC/zgeesx.f Fri May 25 16:00:23 2001 +@@ -6,6 +6,7 @@ + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University + * June 30, 1999 ++* 8-15-00: Do WS calculations if LWORK = -1 (eca) + * + * .. Scalar Arguments .. + CHARACTER JOBVS, SENSE, SORT +@@ -119,6 +120,10 @@ + * this routine. Note that 2*SDIM*(N-SDIM) <= N*N/2. + * For good performance, LWORK must generally be larger. + * ++* If LWORK = -1, a workspace query is assumed. The optimal ++* size for the WORK array is calculated and stored in WORK(1), ++* and no other work except argument checking is performed. ++* + * RWORK (workspace) DOUBLE PRECISION array, dimension (N) + * + * BWORK (workspace) LOGICAL array, dimension (N) +@@ -144,6 +149,8 @@ + * ===================================================================== + * + * .. Parameters .. ++ INTEGER LQUERV ++ PARAMETER ( LQUERV = -1 ) + DOUBLE PRECISION ZERO, ONE + PARAMETER ( ZERO = 0.0D0, ONE = 1.0D0 ) + * .. +@@ -158,8 +165,8 @@ + DOUBLE PRECISION DUM( 1 ) + * .. + * .. External Subroutines .. +- EXTERNAL DLASCL, XERBLA, ZCOPY, ZGEBAK, ZGEBAL, ZGEHRD, +- $ ZHSEQR, ZLACPY, ZLASCL, ZTRSEN, ZUNGHR ++ EXTERNAL DLABAD, DLASCL, XERBLA, ZCOPY, ZGEBAK, ZGEBAL, ++ $ ZGEHRD, ZHSEQR, ZLACPY, ZLASCL, ZTRSEN, ZUNGHR + * .. + * .. External Functions .. + LOGICAL LSAME +@@ -211,7 +218,7 @@ + * in the code.) + * + MINWRK = 1 +- IF( INFO.EQ.0 .AND. ( LWORK.GE.1 ) ) THEN ++ IF( INFO.EQ.0 ) THEN + MAXWRK = N + N*ILAENV( 1, 'ZGEHRD', ' ', N, 1, N, 0 ) + MINWRK = MAX( 1, 2*N ) + IF( .NOT.WANTVS ) THEN +@@ -229,18 +236,25 @@ + HSWORK = MAX( K*( K+2 ), 2*N ) + MAXWRK = MAX( MAXWRK, HSWORK, 1 ) + END IF ++* ++* Estimate the workspace needed by ZTRSEN. ++* ++ IF( WANTST ) THEN ++ MAXWRK = MAX( MAXWRK, ( N*N+1 ) / 2 ) ++ END IF + WORK( 1 ) = MAXWRK ++ IF( LWORK.LT.MINWRK .AND. LWORK.NE.LQUERV ) ++ $ INFO = -15 + END IF +- IF( LWORK.LT.MINWRK ) THEN +- INFO = -15 +- END IF ++* ++* Quick returns ++* + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'ZGEESX', -INFO ) + RETURN + END IF +-* +-* Quick return if possible +-* ++ IF( LWORK.EQ.LQUERV ) ++ $ RETURN + IF( N.EQ.0 ) THEN + SDIM = 0 + RETURN +diff -uNr LAPACK.orig/SRC/zgeev.f LAPACK/SRC/zgeev.f +--- LAPACK.orig/SRC/zgeev.f Thu Nov 4 14:25:01 1999 ++++ LAPACK/SRC/zgeev.f Fri May 25 16:00:53 2001 +@@ -5,6 +5,7 @@ + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University + * June 30, 1999 ++* 8-15-00: Improve consistency of WS calculations (eca) + * + * .. Scalar Arguments .. + CHARACTER JOBVL, JOBVR +@@ -85,10 +86,9 @@ + * The dimension of the array WORK. LWORK >= max(1,2*N). + * For good performance, LWORK must generally be larger. + * +-* If LWORK = -1, then a workspace query is assumed; the routine +-* only calculates the optimal size of the WORK array, returns +-* this value as the first entry of the WORK array, and no error +-* message related to LWORK is issued by XERBLA. ++* If LWORK = -1, a workspace query is assumed. The optimal ++* size for the WORK array is calculated and stored in WORK(1), ++* and no other work except argument checking is performed. + * + * RWORK (workspace) DOUBLE PRECISION array, dimension (2*N) + * +@@ -103,11 +103,13 @@ + * ===================================================================== + * + * .. Parameters .. ++ INTEGER LQUERV ++ PARAMETER ( LQUERV = -1 ) + DOUBLE PRECISION ZERO, ONE + PARAMETER ( ZERO = 0.0D0, ONE = 1.0D0 ) + * .. + * .. Local Scalars .. +- LOGICAL LQUERY, SCALEA, WANTVL, WANTVR ++ LOGICAL SCALEA, WANTVL, WANTVR + CHARACTER SIDE + INTEGER HSWORK, I, IBAL, IERR, IHI, ILO, IRWORK, ITAU, + $ IWRK, K, MAXB, MAXWRK, MINWRK, NOUT +@@ -119,8 +121,8 @@ + DOUBLE PRECISION DUM( 1 ) + * .. + * .. External Subroutines .. +- EXTERNAL XERBLA, ZDSCAL, ZGEBAK, ZGEBAL, ZGEHRD, ZHSEQR, +- $ ZLACPY, ZLASCL, ZSCAL, ZTREVC, ZUNGHR ++ EXTERNAL DLABAD, XERBLA, ZDSCAL, ZGEBAK, ZGEBAL, ZGEHRD, ++ $ ZHSEQR, ZLACPY, ZLASCL, ZSCAL, ZTREVC, ZUNGHR + * .. + * .. External Functions .. + LOGICAL LSAME +@@ -136,7 +138,6 @@ + * Test the input arguments + * + INFO = 0 +- LQUERY = ( LWORK.EQ.-1 ) + WANTVL = LSAME( JOBVL, 'V' ) + WANTVR = LSAME( JOBVR, 'V' ) + IF( ( .NOT.WANTVL ) .AND. ( .NOT.LSAME( JOBVL, 'N' ) ) ) THEN +@@ -165,7 +166,7 @@ + * the worst case.) + * + MINWRK = 1 +- IF( INFO.EQ.0 .AND. ( LWORK.GE.1 .OR. LQUERY ) ) THEN ++ IF( INFO.EQ.0 ) THEN + MAXWRK = N + N*ILAENV( 1, 'ZGEHRD', ' ', N, 1, N, 0 ) + IF( ( .NOT.WANTVL ) .AND. ( .NOT.WANTVR ) ) THEN + MINWRK = MAX( 1, 2*N ) +@@ -185,19 +186,18 @@ + MAXWRK = MAX( MAXWRK, HSWORK, 2*N ) + END IF + WORK( 1 ) = MAXWRK ++ IF( LWORK.LT.MINWRK .AND. LWORK.NE.LQUERV ) ++ $ INFO = -12 + END IF +- IF( LWORK.LT.MINWRK .AND. .NOT.LQUERY ) THEN +- INFO = -12 +- END IF ++* ++* Quick returns ++* + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'ZGEEV ', -INFO ) + RETURN +- ELSE IF( LQUERY ) THEN +- RETURN + END IF +-* +-* Quick return if possible +-* ++ IF( LWORK.EQ.LQUERV ) ++ $ RETURN + IF( N.EQ.0 ) + $ RETURN + * +diff -uNr LAPACK.orig/SRC/zgeevx.f LAPACK/SRC/zgeevx.f +--- LAPACK.orig/SRC/zgeevx.f Thu Nov 4 14:25:01 1999 ++++ LAPACK/SRC/zgeevx.f Fri May 25 16:01:18 2001 +@@ -6,6 +6,7 @@ + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University + * June 30, 1999 ++* 8-15-00: Improve consistency of WS calculations (eca) + * + * .. Scalar Arguments .. + CHARACTER BALANC, JOBVL, JOBVR, SENSE +@@ -166,10 +167,9 @@ + * LWORK >= N*N+2*N. + * For good performance, LWORK must generally be larger. + * +-* If LWORK = -1, then a workspace query is assumed; the routine +-* only calculates the optimal size of the WORK array, returns +-* this value as the first entry of the WORK array, and no error +-* message related to LWORK is issued by XERBLA. ++* If LWORK = -1, a workspace query is assumed. The optimal ++* size for the WORK array is calculated and stored in WORK(1), ++* and no other work except argument checking is performed. + * + * RWORK (workspace) DOUBLE PRECISION array, dimension (2*N) + * +@@ -184,12 +184,14 @@ + * ===================================================================== + * + * .. Parameters .. ++ INTEGER LQUERV ++ PARAMETER ( LQUERV = -1 ) + DOUBLE PRECISION ZERO, ONE + PARAMETER ( ZERO = 0.0D0, ONE = 1.0D0 ) + * .. + * .. Local Scalars .. +- LOGICAL LQUERY, SCALEA, WANTVL, WANTVR, WNTSNB, WNTSNE, +- $ WNTSNN, WNTSNV ++ LOGICAL SCALEA, WANTVL, WANTVR, WNTSNB, WNTSNE, WNTSNN, ++ $ WNTSNV + CHARACTER JOB, SIDE + INTEGER HSWORK, I, ICOND, IERR, ITAU, IWRK, K, MAXB, + $ MAXWRK, MINWRK, NOUT +@@ -201,9 +203,9 @@ + DOUBLE PRECISION DUM( 1 ) + * .. + * .. External Subroutines .. +- EXTERNAL DLASCL, XERBLA, ZDSCAL, ZGEBAK, ZGEBAL, ZGEHRD, +- $ ZHSEQR, ZLACPY, ZLASCL, ZSCAL, ZTREVC, ZTRSNA, +- $ ZUNGHR ++ EXTERNAL DLABAD, DLASCL, XERBLA, ZDSCAL, ZGEBAK, ZGEBAL, ++ $ ZGEHRD, ZHSEQR, ZLACPY, ZLASCL, ZSCAL, ZTREVC, ++ $ ZTRSNA, ZUNGHR + * .. + * .. External Functions .. + LOGICAL LSAME +@@ -219,7 +221,6 @@ + * Test the input arguments + * + INFO = 0 +- LQUERY = ( LWORK.EQ.-1 ) + WANTVL = LSAME( JOBVL, 'V' ) + WANTVR = LSAME( JOBVR, 'V' ) + WNTSNN = LSAME( SENSE, 'N' ) +@@ -260,7 +261,7 @@ + * the worst case.) + * + MINWRK = 1 +- IF( INFO.EQ.0 .AND. ( LWORK.GE.1 .OR. LQUERY ) ) THEN ++ IF( INFO.EQ.0 ) THEN + MAXWRK = N + N*ILAENV( 1, 'ZGEHRD', ' ', N, 1, N, 0 ) + IF( ( .NOT.WANTVL ) .AND. ( .NOT.WANTVR ) ) THEN + MINWRK = MAX( 1, 2*N ) +@@ -294,19 +295,18 @@ + MAXWRK = MAX( MAXWRK, 2*N, 1 ) + END IF + WORK( 1 ) = MAXWRK ++ IF( LWORK.LT.MINWRK .AND. LWORK.NE.LQUERV ) ++ $ INFO = -20 + END IF +- IF( LWORK.LT.MINWRK .AND. .NOT.LQUERY ) THEN +- INFO = -20 +- END IF ++* ++* Quick returns ++* + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'ZGEEVX', -INFO ) + RETURN +- ELSE IF( LQUERY ) THEN +- RETURN + END IF +-* +-* Quick return if possible +-* ++ IF( LWORK.EQ.LQUERV ) ++ $ RETURN + IF( N.EQ.0 ) + $ RETURN + * +diff -uNr LAPACK.orig/SRC/zgegs.f LAPACK/SRC/zgegs.f +--- LAPACK.orig/SRC/zgegs.f Thu Nov 4 14:25:01 1999 ++++ LAPACK/SRC/zgegs.f Fri May 25 16:02:04 2001 +@@ -5,7 +5,7 @@ + * -- LAPACK driver routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* June 30, 1999 ++* April 26, 2001 + * + * .. Scalar Arguments .. + CHARACTER JOBVSL, JOBVSR +@@ -23,83 +23,71 @@ + * + * This routine is deprecated and has been replaced by routine ZGGES. + * +-* ZGEGS computes for a pair of N-by-N complex nonsymmetric matrices A, +-* B: the generalized eigenvalues (alpha, beta), the complex Schur +-* form (A, B), and optionally left and/or right Schur vectors +-* (VSL and VSR). +-* +-* (If only the generalized eigenvalues are needed, use the driver ZGEGV +-* instead.) +-* +-* A generalized eigenvalue for a pair of matrices (A,B) is, roughly +-* speaking, a scalar w or a ratio alpha/beta = w, such that A - w*B +-* is singular. It is usually represented as the pair (alpha,beta), +-* as there is a reasonable interpretation for beta=0, and even for +-* both being zero. A good beginning reference is the book, "Matrix +-* Computations", by G. Golub & C. van Loan (Johns Hopkins U. Press) +-* +-* The (generalized) Schur form of a pair of matrices is the result of +-* multiplying both matrices on the left by one unitary matrix and +-* both on the right by another unitary matrix, these two unitary +-* matrices being chosen so as to bring the pair of matrices into +-* upper triangular form with the diagonal elements of B being +-* non-negative real numbers (this is also called complex Schur form.) +-* +-* The left and right Schur vectors are the columns of VSL and VSR, +-* respectively, where VSL and VSR are the unitary matrices +-* which reduce A and B to Schur form: +-* +-* Schur form of (A,B) = ( (VSL)**H A (VSR), (VSL)**H B (VSR) ) ++* ZGEGS computes the eigenvalues, Schur form, and, optionally, the ++* left and or/right Schur vectors of a complex matrix pair (A,B). ++* Given two square matrices A and B, the generalized Schur ++* factorization has the form ++* ++* A = Q*S*Z**H, B = Q*T*Z**H ++* ++* where Q and Z are unitary matrices and S and T are upper triangular. ++* The columns of Q are the left Schur vectors ++* and the columns of Z are the right Schur vectors. ++* ++* If only the eigenvalues of (A,B) are needed, the driver routine ++* ZGEGV should be used instead. See ZGEGV for a description of the ++* eigenvalues of the generalized nonsymmetric eigenvalue problem ++* (GNEP). + * + * Arguments + * ========= + * + * JOBVSL (input) CHARACTER*1 + * = 'N': do not compute the left Schur vectors; +-* = 'V': compute the left Schur vectors. ++* = 'V': compute the left Schur vectors (returned in VSL). + * + * JOBVSR (input) CHARACTER*1 + * = 'N': do not compute the right Schur vectors; +-* = 'V': compute the right Schur vectors. ++* = 'V': compute the right Schur vectors (returned in VSR). + * + * N (input) INTEGER + * The order of the matrices A, B, VSL, and VSR. N >= 0. + * + * A (input/output) COMPLEX*16 array, dimension (LDA, N) +-* On entry, the first of the pair of matrices whose generalized +-* eigenvalues and (optionally) Schur vectors are to be +-* computed. +-* On exit, the generalized Schur form of A. ++* On entry, the matrix A. ++* On exit, the upper triangular matrix S from the generalized ++* Schur factorization. + * + * LDA (input) INTEGER + * The leading dimension of A. LDA >= max(1,N). + * + * B (input/output) COMPLEX*16 array, dimension (LDB, N) +-* On entry, the second of the pair of matrices whose +-* generalized eigenvalues and (optionally) Schur vectors are +-* to be computed. +-* On exit, the generalized Schur form of B. ++* On entry, the matrix B. ++* On exit, the upper triangular matrix T from the generalized ++* Schur factorization. + * + * LDB (input) INTEGER + * The leading dimension of B. LDB >= max(1,N). + * + * ALPHA (output) COMPLEX*16 array, dimension (N) ++* The complex scalars alpha that define the eigenvalues of ++* GNEP. ALPHA(j) = S(j,j), the diagonal element of the Schur ++* form of A. ++* + * BETA (output) COMPLEX*16 array, dimension (N) +-* On exit, ALPHA(j)/BETA(j), j=1,...,N, will be the +-* generalized eigenvalues. ALPHA(j), j=1,...,N and BETA(j), +-* j=1,...,N are the diagonals of the complex Schur form (A,B) +-* output by ZGEGS. The BETA(j) will be non-negative real. +-* +-* Note: the quotients ALPHA(j)/BETA(j) may easily over- or +-* underflow, and BETA(j) may even be zero. Thus, the user +-* should avoid naively computing the ratio alpha/beta. +-* However, ALPHA will be always less than and usually +-* comparable with norm(A) in magnitude, and BETA always less +-* than and usually comparable with norm(B). ++* The non-negative real scalars beta that define the ++* eigenvalues of GNEP. BETA(j) = T(j,j), the diagonal element ++* of the triangular factor T. ++* ++* Together, the quantities alpha = ALPHA(j) and beta = BETA(j) ++* represent the j-th eigenvalue of the matrix pair (A,B), in ++* one of the forms lambda = alpha/beta or mu = beta/alpha. ++* Since either lambda or mu may overflow, they should not, ++* in general, be computed. ++* + * + * VSL (output) COMPLEX*16 array, dimension (LDVSL,N) +-* If JOBVSL = 'V', VSL will contain the left Schur vectors. +-* (See "Purpose", above.) ++* If JOBVSL = 'V', the matrix of left Schur vectors Q. + * Not referenced if JOBVSL = 'N'. + * + * LDVSL (input) INTEGER +@@ -107,8 +95,7 @@ + * if JOBVSL = 'V', LDVSL >= N. + * + * VSR (output) COMPLEX*16 array, dimension (LDVSR,N) +-* If JOBVSR = 'V', VSR will contain the right Schur vectors. +-* (See "Purpose", above.) ++* If JOBVSR = 'V', the matrix of right Schur vectors Z. + * Not referenced if JOBVSR = 'N'. + * + * LDVSR (input) INTEGER +diff -uNr LAPACK.orig/SRC/zgegv.f LAPACK/SRC/zgegv.f +--- LAPACK.orig/SRC/zgegv.f Thu Nov 4 14:25:45 1999 ++++ LAPACK/SRC/zgegv.f Fri May 25 16:02:27 2001 +@@ -4,7 +4,7 @@ + * -- LAPACK driver routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* June 30, 1999 ++* April 26, 2001 + * + * .. Scalar Arguments .. + CHARACTER JOBVL, JOBVR +@@ -22,22 +22,28 @@ + * + * This routine is deprecated and has been replaced by routine ZGGEV. + * +-* ZGEGV computes for a pair of N-by-N complex nonsymmetric matrices A +-* and B, the generalized eigenvalues (alpha, beta), and optionally, +-* the left and/or right generalized eigenvectors (VL and VR). +-* +-* A generalized eigenvalue for a pair of matrices (A,B) is, roughly +-* speaking, a scalar w or a ratio alpha/beta = w, such that A - w*B +-* is singular. It is usually represented as the pair (alpha,beta), +-* as there is a reasonable interpretation for beta=0, and even for +-* both being zero. A good beginning reference is the book, "Matrix +-* Computations", by G. Golub & C. van Loan (Johns Hopkins U. Press) +-* +-* A right generalized eigenvector corresponding to a generalized +-* eigenvalue w for a pair of matrices (A,B) is a vector r such +-* that (A - w B) r = 0 . A left generalized eigenvector is a vector +-* l such that l**H * (A - w B) = 0, where l**H is the +-* conjugate-transpose of l. ++* ZGEGV computes the eigenvalues and, optionally, the left and/or right ++* eigenvectors of a complex matrix pair (A,B). ++* Given two square matrices A and B, ++* the generalized nonsymmetric eigenvalue problem (GNEP) is to find the ++* eigenvalues lambda and corresponding (non-zero) eigenvectors x such ++* that ++* A*x = lambda*B*x. ++* ++* An alternate form is to find the eigenvalues mu and corresponding ++* eigenvectors y such that ++* mu*A*y = B*y. ++* ++* These two forms are equivalent with mu = 1/lambda and x = y if ++* neither lambda nor mu is zero. In order to deal with the case that ++* lambda or mu is zero or small, two values alpha and beta are returned ++* for each eigenvalue, such that lambda = alpha/beta and ++* mu = beta/alpha. ++* ++* The vectors x and y in the above equations are right eigenvectors of ++* the matrix pair (A,B). Vectors u and v satisfying ++* u**H*A = lambda*u**H*B or mu*v**H*A = v**H*B ++* are left eigenvectors of (A,B). + * + * Note: this routine performs "full balancing" on A and B -- see + * "Further Details", below. +@@ -47,56 +53,61 @@ + * + * JOBVL (input) CHARACTER*1 + * = 'N': do not compute the left generalized eigenvectors; +-* = 'V': compute the left generalized eigenvectors. ++* = 'V': compute the left generalized eigenvectors (returned ++* in VL). + * + * JOBVR (input) CHARACTER*1 + * = 'N': do not compute the right generalized eigenvectors; +-* = 'V': compute the right generalized eigenvectors. ++* = 'V': compute the right generalized eigenvectors (returned ++* in VR). + * + * N (input) INTEGER + * The order of the matrices A, B, VL, and VR. N >= 0. + * + * A (input/output) COMPLEX*16 array, dimension (LDA, N) +-* On entry, the first of the pair of matrices whose +-* generalized eigenvalues and (optionally) generalized +-* eigenvectors are to be computed. +-* On exit, the contents will have been destroyed. (For a +-* description of the contents of A on exit, see "Further +-* Details", below.) ++* On entry, the matrix A. ++* If JOBVL = 'V' or JOBVR = 'V', then on exit A ++* contains the Schur form of A from the generalized Schur ++* factorization of the pair (A,B) after balancing. If no ++* eigenvectors were computed, then only the diagonal elements ++* of the Schur form will be correct. See ZGGHRD and ZHGEQZ ++* for details. + * + * LDA (input) INTEGER + * The leading dimension of A. LDA >= max(1,N). + * + * B (input/output) COMPLEX*16 array, dimension (LDB, N) +-* On entry, the second of the pair of matrices whose +-* generalized eigenvalues and (optionally) generalized +-* eigenvectors are to be computed. +-* On exit, the contents will have been destroyed. (For a +-* description of the contents of B on exit, see "Further +-* Details", below.) ++* On entry, the matrix B. ++* If JOBVL = 'V' or JOBVR = 'V', then on exit B contains the ++* upper triangular matrix obtained from B in the generalized ++* Schur factorization of the pair (A,B) after balancing. ++* If no eigenvectors were computed, then only the diagonal ++* elements of B will be correct. See ZGGHRD and ZHGEQZ for ++* details. + * + * LDB (input) INTEGER + * The leading dimension of B. LDB >= max(1,N). + * + * ALPHA (output) COMPLEX*16 array, dimension (N) +-* BETA (output) COMPLEX*16 array, dimension (N) +-* On exit, ALPHA(j)/BETA(j), j=1,...,N, will be the +-* generalized eigenvalues. ++* The complex scalars alpha that define the eigenvalues of ++* GNEP. + * +-* Note: the quotients ALPHA(j)/BETA(j) may easily over- or +-* underflow, and BETA(j) may even be zero. Thus, the user +-* should avoid naively computing the ratio alpha/beta. +-* However, ALPHA will be always less than and usually +-* comparable with norm(A) in magnitude, and BETA always less +-* than and usually comparable with norm(B). ++* BETA (output) COMPLEX*16 array, dimension (N) ++* The complex scalars beta that define the eigenvalues of GNEP. ++* ++* Together, the quantities alpha = ALPHA(j) and beta = BETA(j) ++* represent the j-th eigenvalue of the matrix pair (A,B), in ++* one of the forms lambda = alpha/beta or mu = beta/alpha. ++* Since either lambda or mu may overflow, they should not, ++* in general, be computed. + * + * VL (output) COMPLEX*16 array, dimension (LDVL,N) +-* If JOBVL = 'V', the left generalized eigenvectors. (See +-* "Purpose", above.) +-* Each eigenvector will be scaled so the largest component +-* will have abs(real part) + abs(imag. part) = 1, *except* +-* that for eigenvalues with alpha=beta=0, a zero vector will +-* be returned as the corresponding eigenvector. ++* If JOBVL = 'V', the left eigenvectors u(j) are stored ++* in the columns of VL, in the same order as their eigenvalues. ++* Each eigenvector is scaled so that its largest component has ++* abs(real part) + abs(imag. part) = 1, except for eigenvectors ++* corresponding to an eigenvalue with alpha = beta = 0, which ++* are set to zero. + * Not referenced if JOBVL = 'N'. + * + * LDVL (input) INTEGER +@@ -104,12 +115,12 @@ + * if JOBVL = 'V', LDVL >= N. + * + * VR (output) COMPLEX*16 array, dimension (LDVR,N) +-* If JOBVR = 'V', the right generalized eigenvectors. (See +-* "Purpose", above.) +-* Each eigenvector will be scaled so the largest component +-* will have abs(real part) + abs(imag. part) = 1, *except* +-* that for eigenvalues with alpha=beta=0, a zero vector will +-* be returned as the corresponding eigenvector. ++* If JOBVR = 'V', the right eigenvectors x(j) are stored ++* in the columns of VR, in the same order as their eigenvalues. ++* Each eigenvector is scaled so that its largest component has ++* abs(real part) + abs(imag. part) = 1, except for eigenvectors ++* corresponding to an eigenvalue with alpha = beta = 0, which ++* are set to zero. + * Not referenced if JOBVR = 'N'. + * + * LDVR (input) INTEGER +@@ -123,8 +134,8 @@ + * The dimension of the array WORK. LWORK >= max(1,2*N). + * For good performance, LWORK must generally be larger. + * To compute the optimal value of LWORK, call ILAENV to get +-* blocksizes (for ZGEQRF, ZUNMQR, and CUNGQR.) Then compute: +-* NB -- MAX of the blocksizes for ZGEQRF, ZUNMQR, and CUNGQR; ++* blocksizes (for ZGEQRF, ZUNMQR, and ZUNGQR.) Then compute: ++* NB -- MAX of the blocksizes for ZGEQRF, ZUNMQR, and ZUNGQR; + * The optimal LWORK is MAX( 2*N, N*(NB+1) ). + * + * If LWORK = -1, then a workspace query is assumed; the routine +diff -uNr LAPACK.orig/SRC/zgelsd.f LAPACK/SRC/zgelsd.f +--- LAPACK.orig/SRC/zgelsd.f Thu Nov 4 14:26:26 1999 ++++ LAPACK/SRC/zgelsd.f Fri May 25 16:03:34 2001 +@@ -4,7 +4,8 @@ + * -- LAPACK driver routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* October 31, 1999 ++* June 30, 1999 ++* 8-15-00: Improve consistency of WS calculations (eca) + * + * .. Scalar Arguments .. + INTEGER INFO, LDA, LDB, LWORK, M, N, NRHS, RANK +@@ -62,9 +63,10 @@ + * The number of right hand sides, i.e., the number of columns + * of the matrices B and X. NRHS >= 0. + * +-* A (input) COMPLEX*16 array, dimension (LDA,N) ++* A (input/output) COMPLEX*16 array, dimension (LDA,N) + * On entry, the M-by-N matrix A. +-* On exit, A has been destroyed. ++* On exit, the first min(m,n) rows of A are overwritten with ++* its right singular vectors, stored rowwise. + * + * LDA (input) INTEGER + * The leading dimension of the array A. LDA >= max(1,M). +@@ -96,31 +98,24 @@ + * On exit, if INFO = 0, WORK(1) returns the optimal LWORK. + * + * LWORK (input) INTEGER +-* The dimension of the array WORK. LWORK must be at least 1. ++* The dimension of the array WORK. LWORK >= 1. + * The exact minimum amount of workspace needed depends on M, +-* N and NRHS. As long as LWORK is at least +-* 2 * N + N * NRHS +-* if M is greater than or equal to N or +-* 2 * M + M * NRHS +-* if M is less than N, the code will execute correctly. ++* N and NRHS. ++* If M >= N, LWORK >= 2*N + N*NRHS. ++* If M < N, LWORK >= 2*M + M*NRHS. + * For good performance, LWORK should generally be larger. + * +-* If LWORK = -1, then a workspace query is assumed; the routine +-* only calculates the optimal size of the WORK array, returns +-* this value as the first entry of the WORK array, and no error +-* message related to LWORK is issued by XERBLA. +-* +-* RWORK (workspace) DOUBLE PRECISION array, dimension at least +-* 10*N + 2*N*SMLSIZ + 8*N*NLVL + 3*SMLSIZ*NRHS + +-* (SMLSIZ+1)**2 +-* if M is greater than or equal to N or +-* 10*M + 2*M*SMLSIZ + 8*M*NLVL + 3*SMLSIZ*NRHS + +-* (SMLSIZ+1)**2 +-* if M is less than N, the code will execute correctly. ++* If LWORK = -1, a workspace query is assumed. The optimal ++* size for the WORK array is calculated and stored in WORK(1), ++* and no other work except argument checking is performed. ++* ++* RWORK (workspace) DOUBLE PRECISION array, dimension (LRWORK) ++* If M >= N, LRWORK >= 8*N + 2*N*SMLSIZ + 8*N*NLVL + N*NRHS. ++* If M < N, LRWORK >= 8*M + 2*M*SMLSIZ + 8*M*NLVL + M*NRHS. + * SMLSIZ is returned by ILAENV and is equal to the maximum + * size of the subproblems at the bottom of the computation + * tree (usually about 25), and +-* NLVL = MAX( 0, INT( LOG_2( MIN( M,N )/(SMLSIZ+1) ) ) + 1 ) ++* NLVL = INT( LOG_2( MIN( M,N )/(SMLSIZ+1) ) ) + 1 + * + * IWORK (workspace) INTEGER array, dimension (LIWORK) + * LIWORK >= 3 * MINMN * NLVL + 11 * MINMN, +@@ -144,13 +139,14 @@ + * ===================================================================== + * + * .. Parameters .. ++ INTEGER LQUERV ++ PARAMETER ( LQUERV = -1 ) + DOUBLE PRECISION ZERO, ONE + PARAMETER ( ZERO = 0.0D+0, ONE = 1.0D+0 ) + COMPLEX*16 CZERO + PARAMETER ( CZERO = ( 0.0D+0, 0.0D+0 ) ) + * .. + * .. Local Scalars .. +- LOGICAL LQUERY + INTEGER IASCL, IBSCL, IE, IL, ITAU, ITAUP, ITAUQ, + $ LDWORK, MAXMN, MAXWRK, MINMN, MINWRK, MM, + $ MNTHR, NRWORK, NWORK, SMLSIZ +@@ -177,7 +173,6 @@ + MINMN = MIN( M, N ) + MAXMN = MAX( M, N ) + MNTHR = ILAENV( 6, 'ZGELSD', ' ', M, N, NRHS, -1 ) +- LQUERY = ( LWORK.EQ.-1 ) + IF( M.LT.0 ) THEN + INFO = -1 + ELSE IF( N.LT.0 ) THEN +@@ -261,20 +256,18 @@ + END IF + MINWRK = MIN( MINWRK, MAXWRK ) + WORK( 1 ) = DCMPLX( MAXWRK, 0 ) +- IF( LWORK.LT.MINWRK .AND. .NOT.LQUERY ) THEN +- INFO = -12 +- END IF ++ IF( LWORK.LT.MINWRK .AND. LWORK.NE.LQUERV ) ++ $ INFO = -12 + END IF + * ++* Quick returns ++* + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'ZGELSD', -INFO ) + RETURN +- ELSE IF( LQUERY ) THEN +- GO TO 10 + END IF +-* +-* Quick return if possible. +-* ++ IF( LWORK.EQ.LQUERV ) ++ $ RETURN + IF( M.EQ.0 .OR. N.EQ.0 ) THEN + RANK = 0 + RETURN +diff -uNr LAPACK.orig/SRC/zgelss.f LAPACK/SRC/zgelss.f +--- LAPACK.orig/SRC/zgelss.f Thu Nov 4 14:25:02 1999 ++++ LAPACK/SRC/zgelss.f Fri May 25 16:04:00 2001 +@@ -4,7 +4,7 @@ + * -- LAPACK driver routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* October 31, 1999 ++* April 25, 2001 + * + * .. Scalar Arguments .. + INTEGER INFO, LDA, LDB, LWORK, M, N, NRHS, RANK +@@ -87,10 +87,9 @@ + * LWORK >= 2*min(M,N) + max(M,N,NRHS) + * For good performance, LWORK should generally be larger. + * +-* If LWORK = -1, then a workspace query is assumed; the routine +-* only calculates the optimal size of the WORK array, returns +-* this value as the first entry of the WORK array, and no error +-* message related to LWORK is issued by XERBLA. ++* If LWORK = -1, a workspace query is assumed. The optimal ++* size for the WORK array is calculated and stored in WORK(1), ++* and no other work except argument checking is performed. + * + * RWORK (workspace) DOUBLE PRECISION array, dimension (5*min(M,N)) + * +@@ -164,7 +163,7 @@ + * immediately following subroutine, as returned by ILAENV.) + * + MINWRK = 1 +- IF( INFO.EQ.0 .AND. ( LWORK.GE.1 .OR. LQUERY ) ) THEN ++ IF( INFO.EQ.0 ) THEN + MAXWRK = 0 + MM = M + IF( M.GE.N .AND. M.GE.MNTHR ) THEN +@@ -232,22 +231,20 @@ + MAXWRK = MAX( MAXWRK, N*NRHS ) + END IF + END IF +- MINWRK = MAX( MINWRK, 1 ) + MAXWRK = MAX( MINWRK, MAXWRK ) + WORK( 1 ) = MAXWRK ++ IF( LWORK.LT.MINWRK .AND. .NOT.LQUERY ) ++ $ INFO = -12 + END IF + * +- IF( LWORK.LT.MINWRK .AND. .NOT.LQUERY ) +- $ INFO = -12 ++* Quick returns ++* + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'ZGELSS', -INFO ) + RETURN + ELSE IF( LQUERY ) THEN + RETURN + END IF +-* +-* Quick return if possible +-* + IF( M.EQ.0 .OR. N.EQ.0 ) THEN + RANK = 0 + RETURN +@@ -512,8 +509,8 @@ + DO 40 I = 1, NRHS, CHUNK + BL = MIN( NRHS-I+1, CHUNK ) + CALL ZGEMM( 'C', 'N', M, BL, M, CONE, WORK( IL ), LDWORK, +- $ B( 1, I ), LDB, CZERO, WORK( IWORK ), N ) +- CALL ZLACPY( 'G', M, BL, WORK( IWORK ), N, B( 1, I ), ++ $ B( 1, I ), LDB, CZERO, WORK( IWORK ), M ) ++ CALL ZLACPY( 'G', M, BL, WORK( IWORK ), M, B( 1, I ), + $ LDB ) + 40 CONTINUE + ELSE +diff -uNr LAPACK.orig/SRC/zgesdd.f LAPACK/SRC/zgesdd.f +--- LAPACK.orig/SRC/zgesdd.f Thu Nov 11 20:33:19 1999 ++++ LAPACK/SRC/zgesdd.f Fri May 25 16:08:08 2001 +@@ -4,7 +4,8 @@ + * -- LAPACK driver routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* October 31, 1999 ++* June 30, 1999 ++* 8-15-00: Improve consistency of WS calculations (eca) + * + * .. Scalar Arguments .. + CHARACTER JOBZ +@@ -119,12 +120,14 @@ + * if JOBZ = 'S' or 'A', + * LWORK >= min(M,N)*min(M,N)+2*min(M,N)+max(M,N). + * For good performance, LWORK should generally be larger. +-* If LWORK < 0 but other input arguments are legal, WORK(1) +-* returns the optimal LWORK. ++* ++* If LWORK = -1, a workspace query is assumed. The optimal ++* size for the WORK array is calculated and stored in WORK(1), ++* and no other work except argument checking is performed. + * + * RWORK (workspace) DOUBLE PRECISION array, dimension (LRWORK) +-* If JOBZ = 'N', LRWORK >= 7*min(M,N). +-* Otherwise, LRWORK >= 5*min(M,N)*min(M,N) + 5*min(M,N) ++* If JOBZ = 'N', LRWORK >= 5*min(M,N). ++* Otherwise, LRWORK >= 5*min(M,N)*min(M,N) + 7*min(M,N) + * + * IWORK (workspace) INTEGER array, dimension (8*min(M,N)) + * +@@ -143,14 +146,16 @@ + * ===================================================================== + * + * .. Parameters .. ++ INTEGER LQUERV ++ PARAMETER ( LQUERV = -1 ) + COMPLEX*16 CZERO, CONE +- PARAMETER ( CZERO = ( 0.0D0, 0.0D0 ), +- $ CONE = ( 1.0D0, 0.0D0 ) ) ++ PARAMETER ( CZERO = ( 0.0D+0, 0.0D+0 ), ++ $ CONE = ( 1.0D+0, 0.0D+0 ) ) + DOUBLE PRECISION ZERO, ONE +- PARAMETER ( ZERO = 0.0D0, ONE = 1.0D0 ) ++ PARAMETER ( ZERO = 0.0D+0, ONE = 1.0D+0 ) + * .. + * .. Local Scalars .. +- LOGICAL LQUERY, WNTQA, WNTQAS, WNTQN, WNTQO, WNTQS ++ LOGICAL WNTQA, WNTQAS, WNTQN, WNTQO, WNTQS + INTEGER BLK, CHUNK, I, IE, IERR, IL, IR, IRU, IRVT, + $ ISCL, ITAU, ITAUP, ITAUQ, IU, IVT, LDWKVT, + $ LDWRKL, LDWRKR, LDWRKU, MAXWRK, MINMN, MINWRK, +@@ -162,15 +167,15 @@ + DOUBLE PRECISION DUM( 1 ) + * .. + * .. External Subroutines .. +- EXTERNAL DBDSDC, DLASCL, XERBLA, ZGEBRD, ZGELQF, ZGEMM, +- $ ZGEQRF, ZLACP2, ZLACPY, ZLACRM, ZLARCM, ZLASCL, ++ EXTERNAL DBDSDC, DLASCL, XERBLA, ZGEBRD, ZGELQF, ZGEMM, ++ $ ZGEQRF, ZLACP2, ZLACPY, ZLACRM, ZLARCM, ZLASCL, + $ ZLASET, ZUNGBR, ZUNGLQ, ZUNGQR, ZUNMBR + * .. + * .. External Functions .. + LOGICAL LSAME + INTEGER ILAENV + DOUBLE PRECISION DLAMCH, ZLANGE +- EXTERNAL DLAMCH, ILAENV, LSAME, ZLANGE ++ EXTERNAL LSAME, ILAENV, DLAMCH, ZLANGE + * .. + * .. Intrinsic Functions .. + INTRINSIC INT, MAX, MIN, SQRT +@@ -190,7 +195,6 @@ + WNTQN = LSAME( JOBZ, 'N' ) + MINWRK = 1 + MAXWRK = 1 +- LQUERY = ( LWORK.EQ.-1 ) + * + IF( .NOT.( WNTQA .OR. WNTQS .OR. WNTQO .OR. WNTQN ) ) THEN + INFO = -1 +@@ -221,19 +225,21 @@ + IF( M.GE.N ) THEN + * + * There is no complex work space needed for bidiagonal SVD +-* The real work space needed for bidiagonal SVD is BDSPAC, +-* BDSPAC = 3*N*N + 4*N ++* The real work space needed for bidiagonal SVD is BDSPAC ++* for computing singular values and singular vectors; BDSPAN ++* for computing singular values only. ++* BDSPAC = 5*N*N + 7*N ++* BDSPAN = MAX(7*N+4, 3*N+2+SMLSIZ*(SMLSIZ+8)) + * + IF( M.GE.MNTHR1 ) THEN + IF( WNTQN ) THEN + * + * Path 1 (M much larger than N, JOBZ='N') + * +- WRKBL = N + N*ILAENV( 1, 'ZGEQRF', ' ', M, N, -1, +- $ -1 ) +- WRKBL = MAX( WRKBL, 2*N+2*N* +- $ ILAENV( 1, 'ZGEBRD', ' ', N, N, -1, -1 ) ) +- MAXWRK = WRKBL ++ MAXWRK = N + N*ILAENV( 1, 'ZGEQRF', ' ', M, N, -1, ++ $ -1 ) ++ MAXWRK = MAX( MAXWRK, 2*N+2*N* ++ $ ILAENV( 1, 'ZGEBRD', ' ', N, N, -1, -1 ) ) + MINWRK = 3*N + ELSE IF( WNTQO ) THEN + * +@@ -335,8 +341,11 @@ + ELSE + * + * There is no complex work space needed for bidiagonal SVD +-* The real work space needed for bidiagonal SVD is BDSPAC, +-* BDSPAC = 3*M*M + 4*M ++* The real work space needed for bidiagonal SVD is BDSPAC ++* for computing singular values and singular vectors; BDSPAN ++* for computing singular values only. ++* BDSPAC = 5*M*M + 7*M ++* BDSPAN = MAX(7*M+4, 3*M+2+SMLSIZ*(SMLSIZ+8)) + * + IF( N.GE.MNTHR1 ) THEN + IF( WNTQN ) THEN +@@ -447,24 +456,22 @@ + END IF + END IF + MAXWRK = MAX( MAXWRK, MINWRK ) ++ END IF ++ IF( INFO.EQ.0 ) THEN + WORK( 1 ) = MAXWRK ++ IF( LWORK.LT.MINWRK .AND. LWORK.NE.LQUERV ) ++ $ INFO = -13 + END IF + * +- IF( LWORK.LT.MINWRK .AND. .NOT.LQUERY ) THEN +- INFO = -13 +- END IF ++* Quick returns ++* + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'ZGESDD', -INFO ) + RETURN +- ELSE IF( LQUERY ) THEN +- RETURN + END IF +-* +-* Quick return if possible +-* ++ IF( LWORK.EQ.LQUERV ) ++ $ RETURN + IF( M.EQ.0 .OR. N.EQ.0 ) THEN +- IF( LWORK.GE.1 ) +- $ WORK( 1 ) = ONE + RETURN + END IF + * +@@ -529,7 +536,7 @@ + * + * Perform bidiagonal SVD, compute singular values only + * (CWorkspace: 0) +-* (RWorkspace: need BDSPAC) ++* (RWorkspace: need BDSPAN) + * + CALL DBDSDC( 'U', 'N', N, S, RWORK( IE ), DUM, 1, DUM, 1, + $ DUM, IDUM, RWORK( NRWORK ), IWORK, INFO ) +@@ -844,7 +851,7 @@ + * + * Compute singular values only + * (Cworkspace: 0) +-* (Rworkspace: need BDSPAC) ++* (Rworkspace: need BDSPAN) + * + CALL DBDSDC( 'U', 'N', N, S, RWORK( IE ), DUM, 1, DUM, 1, + $ DUM, IDUM, RWORK( NRWORK ), IWORK, INFO ) +@@ -1040,7 +1047,7 @@ + * + * Compute singular values only + * (Cworkspace: 0) +-* (Rworkspace: need BDSPAC) ++* (Rworkspace: need BDSPAN) + * + CALL DBDSDC( 'U', 'N', N, S, RWORK( IE ), DUM, 1, DUM, 1, + $ DUM, IDUM, RWORK( NRWORK ), IWORK, INFO ) +@@ -1205,8 +1212,8 @@ + ELSE + * + * A has more columns than rows. If A has sufficiently more +-* columns than rows, first reduce using the LQ decomposition +-* (if sufficient workspace available) ++* columns than rows, first reduce using the LQ decomposition (if ++* sufficient workspace available) + * + IF( N.GE.MNTHR1 ) THEN + * +@@ -1245,7 +1252,7 @@ + * + * Perform bidiagonal SVD, compute singular values only + * (CWorkspace: 0) +-* (RWorkspace: need BDSPAC) ++* (RWorkspace: need BDSPAN) + * + CALL DBDSDC( 'U', 'N', M, S, RWORK( IE ), DUM, 1, DUM, 1, + $ DUM, IDUM, RWORK( NRWORK ), IWORK, INFO ) +@@ -1567,7 +1574,7 @@ + * + * Compute singular values only + * (Cworkspace: 0) +-* (Rworkspace: need BDSPAC) ++* (Rworkspace: need BDSPAN) + * + CALL DBDSDC( 'L', 'N', M, S, RWORK( IE ), DUM, 1, DUM, 1, + $ DUM, IDUM, RWORK( NRWORK ), IWORK, INFO ) +@@ -1763,7 +1770,7 @@ + * + * Compute singular values only + * (Cworkspace: 0) +-* (Rworkspace: need BDSPAC) ++* (Rworkspace: need BDSPAN) + * + CALL DBDSDC( 'L', 'N', M, S, RWORK( IE ), DUM, 1, DUM, 1, + $ DUM, IDUM, RWORK( NRWORK ), IWORK, INFO ) +@@ -1934,9 +1941,15 @@ + IF( ANRM.GT.BIGNUM ) + $ CALL DLASCL( 'G', 0, 0, BIGNUM, ANRM, MINMN, 1, S, MINMN, + $ IERR ) ++ IF( INFO.NE.0 .AND. ANRM.GT.BIGNUM ) ++ $ CALL DLASCL( 'G', 0, 0, BIGNUM, ANRM, MINMN-1, 1, ++ $ RWORK( IE ), MINMN, IERR ) + IF( ANRM.LT.SMLNUM ) + $ CALL DLASCL( 'G', 0, 0, SMLNUM, ANRM, MINMN, 1, S, MINMN, + $ IERR ) ++ IF( INFO.NE.0 .AND. ANRM.LT.SMLNUM ) ++ $ CALL DLASCL( 'G', 0, 0, SMLNUM, ANRM, MINMN-1, 1, ++ $ RWORK( IE ), MINMN, IERR ) + END IF + * + * Return optimal workspace in WORK(1) +diff -uNr LAPACK.orig/SRC/zgesvd.f LAPACK/SRC/zgesvd.f +--- LAPACK.orig/SRC/zgesvd.f Thu Nov 4 14:25:03 1999 ++++ LAPACK/SRC/zgesvd.f Fri May 25 16:08:34 2001 +@@ -4,7 +4,8 @@ + * -- LAPACK driver routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* October 31, 1999 ++* June 30, 1999 ++* 8-15-00: Improve consistency of WS calculations (eca) + * + * .. Scalar Arguments .. + CHARACTER JOBU, JOBVT +@@ -114,12 +115,12 @@ + * LWORK >= 2*MIN(M,N)+MAX(M,N). + * For good performance, LWORK should generally be larger. + * +-* If LWORK = -1, then a workspace query is assumed; the routine +-* only calculates the optimal size of the WORK array, returns +-* this value as the first entry of the WORK array, and no error +-* message related to LWORK is issued by XERBLA. ++* If LWORK = -1, a workspace query is assumed. The optimal ++* size for the WORK array is calculated and stored in WORK(1), ++* and no other work except argument checking is performed. + * +-* RWORK (workspace) DOUBLE PRECISION array, dimension (5*min(M,N)) ++* RWORK (workspace) DOUBLE PRECISION array, dimension ++* (5*min(M,N)) + * On exit, if INFO > 0, RWORK(1:MIN(M,N)-1) contains the + * unconverged superdiagonal elements of an upper bidiagonal + * matrix B whose diagonal is in S (not necessarily sorted). +@@ -137,6 +138,8 @@ + * ===================================================================== + * + * .. Parameters .. ++ INTEGER LQUERV ++ PARAMETER ( LQUERV = -1 ) + COMPLEX*16 CZERO, CONE + PARAMETER ( CZERO = ( 0.0D0, 0.0D0 ), + $ CONE = ( 1.0D0, 0.0D0 ) ) +@@ -144,8 +147,8 @@ + PARAMETER ( ZERO = 0.0D0, ONE = 1.0D0 ) + * .. + * .. Local Scalars .. +- LOGICAL LQUERY, WNTUA, WNTUAS, WNTUN, WNTUO, WNTUS, +- $ WNTVA, WNTVAS, WNTVN, WNTVO, WNTVS ++ LOGICAL WNTUA, WNTUAS, WNTUN, WNTUO, WNTUS, WNTVA, ++ $ WNTVAS, WNTVN, WNTVO, WNTVS + INTEGER BLK, CHUNK, I, IE, IERR, IR, IRWORK, ISCL, + $ ITAU, ITAUP, ITAUQ, IU, IWORK, LDWRKR, LDWRKU, + $ MAXWRK, MINMN, MINWRK, MNTHR, NCU, NCVT, NRU, +@@ -188,7 +191,7 @@ + WNTVO = LSAME( JOBVT, 'O' ) + WNTVN = LSAME( JOBVT, 'N' ) + MINWRK = 1 +- LQUERY = ( LWORK.EQ.-1 ) ++ MAXWRK = 1 + * + IF( .NOT.( WNTUA .OR. WNTUS .OR. WNTUO .OR. WNTUN ) ) THEN + INFO = -1 +@@ -216,8 +219,7 @@ + * real workspace. NB refers to the optimal block size for the + * immediately following subroutine, as returned by ILAENV.) + * +- IF( INFO.EQ.0 .AND. ( LWORK.GE.1 .OR. LQUERY ) .AND. M.GT.0 .AND. +- $ N.GT.0 ) THEN ++ IF( INFO.EQ.0 .AND. M.GT.0 .AND. N.GT.0 ) THEN + IF( M.GE.N ) THEN + * + * Space needed for ZBDSQR is BDSPAC = 5*N +@@ -543,24 +545,22 @@ + MAXWRK = MAX( MINWRK, MAXWRK ) + END IF + END IF ++ END IF ++ IF( INFO.EQ.0 ) THEN + WORK( 1 ) = MAXWRK ++ IF( LWORK.LT.MINWRK .AND. LWORK.NE.LQUERV ) ++ $ INFO = -13 + END IF + * +- IF( LWORK.LT.MINWRK .AND. .NOT.LQUERY ) THEN +- INFO = -13 +- END IF ++* Quick returns ++* + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'ZGESVD', -INFO ) + RETURN +- ELSE IF( LQUERY ) THEN +- RETURN + END IF +-* +-* Quick return if possible +-* ++ IF( LWORK.EQ.LQUERV ) ++ $ RETURN + IF( M.EQ.0 .OR. N.EQ.0 ) THEN +- IF( LWORK.GE.1 ) +- $ WORK( 1 ) = ONE + RETURN + END IF + * +diff -uNr LAPACK.orig/SRC/zggbak.f LAPACK/SRC/zggbak.f +--- LAPACK.orig/SRC/zggbak.f Thu Nov 4 14:25:03 1999 ++++ LAPACK/SRC/zggbak.f Fri May 25 16:09:06 2001 +@@ -4,7 +4,7 @@ + * -- LAPACK routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* September 30, 1994 ++* February 1, 2001 + * + * .. Scalar Arguments .. + CHARACTER JOB, SIDE +@@ -109,10 +109,15 @@ + INFO = -3 + ELSE IF( ILO.LT.1 ) THEN + INFO = -4 +- ELSE IF( IHI.LT.ILO .OR. IHI.GT.MAX( 1, N ) ) THEN ++ ELSE IF( N.EQ.0 .AND. IHI.EQ.0 .AND. ILO.NE.1 ) THEN ++ INFO = -4 ++ ELSE IF( N.GT.0 .AND. ( IHI.LT.ILO .OR. IHI.GT.MAX( 1, N ) ) ) ++ $ THEN ++ INFO = -5 ++ ELSE IF( N.EQ.0 .AND. ILO.EQ.1 .AND. IHI.NE.0 ) THEN + INFO = -5 + ELSE IF( M.LT.0 ) THEN +- INFO = -6 ++ INFO = -8 + ELSE IF( LDV.LT.MAX( 1, N ) ) THEN + INFO = -10 + END IF +diff -uNr LAPACK.orig/SRC/zggbal.f LAPACK/SRC/zggbal.f +--- LAPACK.orig/SRC/zggbal.f Thu Nov 4 14:25:45 1999 ++++ LAPACK/SRC/zggbal.f Fri May 25 16:09:27 2001 +@@ -4,7 +4,7 @@ + * -- LAPACK routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* September 30, 1994 ++* April 12, 2001 + * + * .. Scalar Arguments .. + CHARACTER JOB +@@ -150,7 +150,7 @@ + ELSE IF( LDA.LT.MAX( 1, N ) ) THEN + INFO = -4 + ELSE IF( LDB.LT.MAX( 1, N ) ) THEN +- INFO = -5 ++ INFO = -6 + END IF + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'ZGGBAL', -INFO ) +@@ -197,8 +197,8 @@ + IF( L.NE.1 ) + $ GO TO 30 + * +- RSCALE( 1 ) = 1 +- LSCALE( 1 ) = 1 ++ RSCALE( 1 ) = ONE ++ LSCALE( 1 ) = ONE + GO TO 190 + * + 30 CONTINUE +@@ -256,7 +256,7 @@ + * Permute rows M and I + * + 160 CONTINUE +- LSCALE( M ) = I ++ LSCALE( M ) = DBLE( I ) + IF( I.EQ.M ) + $ GO TO 170 + CALL ZSWAP( N-K+1, A( I, K ), LDA, A( M, K ), LDA ) +@@ -265,7 +265,7 @@ + * Permute columns M and J + * + 170 CONTINUE +- RSCALE( M ) = J ++ RSCALE( M ) = DBLE( J ) + IF( J.EQ.M ) + $ GO TO 180 + CALL ZSWAP( L, A( 1, J ), 1, A( 1, M ), 1 ) +@@ -437,7 +437,7 @@ + DO 360 I = ILO, IHI + IRAB = IZAMAX( N-ILO+1, A( I, ILO ), LDA ) + RAB = ABS( A( I, IRAB+ILO-1 ) ) +- IRAB = IZAMAX( N-ILO+1, B( I, ILO ), LDA ) ++ IRAB = IZAMAX( N-ILO+1, B( I, ILO ), LDB ) + RAB = MAX( RAB, ABS( B( I, IRAB+ILO-1 ) ) ) + LRAB = INT( LOG10( RAB+SFMIN ) / BASL+ONE ) + IR = LSCALE( I ) + SIGN( HALF, LSCALE( I ) ) +diff -uNr LAPACK.orig/SRC/zgges.f LAPACK/SRC/zgges.f +--- LAPACK.orig/SRC/zgges.f Thu Nov 4 14:26:21 1999 ++++ LAPACK/SRC/zgges.f Fri May 25 16:09:47 2001 +@@ -6,6 +6,7 @@ + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University + * June 30, 1999 ++* 8-15-00: Improve consistency of WS calculations (eca) + * + * .. Scalar Arguments .. + CHARACTER JOBVSL, JOBVSR, SORT +@@ -145,10 +146,9 @@ + * The dimension of the array WORK. LWORK >= max(1,2*N). + * For good performance, LWORK must generally be larger. + * +-* If LWORK = -1, then a workspace query is assumed; the routine +-* only calculates the optimal size of the WORK array, returns +-* this value as the first entry of the WORK array, and no error +-* message related to LWORK is issued by XERBLA. ++* If LWORK = -1, a workspace query is assumed. The optimal ++* size for the WORK array is calculated and stored in WORK(1), ++* and no other work except argument checking is performed. + * + * RWORK (workspace) DOUBLE PRECISION array, dimension (8*N) + * +@@ -173,6 +173,8 @@ + * ===================================================================== + * + * .. Parameters .. ++ INTEGER LQUERV ++ PARAMETER ( LQUERV = -1 ) + DOUBLE PRECISION ZERO, ONE + PARAMETER ( ZERO = 0.0D0, ONE = 1.0D0 ) + COMPLEX*16 CZERO, CONE +@@ -181,7 +183,7 @@ + * .. + * .. Local Scalars .. + LOGICAL CURSL, ILASCL, ILBSCL, ILVSL, ILVSR, LASTSL, +- $ LQUERY, WANTST ++ $ WANTST + INTEGER I, ICOLS, IERR, IHI, IJOBVL, IJOBVR, ILEFT, + $ ILO, IRIGHT, IROWS, IRWRK, ITAU, IWRK, LWKMIN, + $ LWKOPT +@@ -193,8 +195,9 @@ + DOUBLE PRECISION DIF( 2 ) + * .. + * .. External Subroutines .. +- EXTERNAL XERBLA, ZGEQRF, ZGGBAK, ZGGBAL, ZGGHRD, ZHGEQZ, +- $ ZLACPY, ZLASCL, ZLASET, ZTGSEN, ZUNGQR, ZUNMQR ++ EXTERNAL DLABAD, XERBLA, ZGEQRF, ZGGBAK, ZGGBAL, ZGGHRD, ++ $ ZHGEQZ, ZLACPY, ZLASCL, ZLASET, ZTGSEN, ZUNGQR, ++ $ ZUNMQR + * .. + * .. External Functions .. + LOGICAL LSAME +@@ -236,7 +239,6 @@ + * Test the input arguments + * + INFO = 0 +- LQUERY = ( LWORK.EQ.-1 ) + IF( IJOBVL.LE.0 ) THEN + INFO = -1 + ELSE IF( IJOBVR.LE.0 ) THEN +@@ -263,7 +265,7 @@ + * following subroutine, as returned by ILAENV.) + * + LWKMIN = 1 +- IF( INFO.EQ.0 .AND. ( LWORK.GE.1 .OR. LQUERY ) ) THEN ++ IF( INFO.EQ.0 ) THEN + LWKMIN = MAX( 1, 2*N ) + LWKOPT = N + N*ILAENV( 1, 'ZGEQRF', ' ', N, 1, N, 0 ) + IF( ILVSL ) THEN +@@ -271,21 +273,18 @@ + $ -1 ) ) + END IF + WORK( 1 ) = LWKOPT ++ IF( LWORK.LT.LWKMIN .AND. LWORK.NE.LQUERV ) ++ $ INFO = -18 + END IF + * +- IF( LWORK.LT.LWKMIN .AND. .NOT.LQUERY ) +- $ INFO = -18 ++* Quick return if possible + * + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'ZGGES ', -INFO ) + RETURN +- ELSE IF( LQUERY ) THEN +- RETURN + END IF +-* +-* Quick return if possible +-* +- WORK( 1 ) = LWKOPT ++ IF( LWORK.EQ.LQUERV ) ++ $ RETURN + IF( N.EQ.0 ) THEN + SDIM = 0 + RETURN +diff -uNr LAPACK.orig/SRC/zggesx.f LAPACK/SRC/zggesx.f +--- LAPACK.orig/SRC/zggesx.f Thu Nov 4 14:26:21 1999 ++++ LAPACK/SRC/zggesx.f Fri May 25 16:10:05 2001 +@@ -7,6 +7,7 @@ + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University + * June 30, 1999 ++* 8-15-00: Do WS calculations if LWORK = -1 (eca) + * + * .. Scalar Arguments .. + CHARACTER JOBVSL, JOBVSR, SENSE, SORT +@@ -167,6 +168,10 @@ + * If SENSE = 'E', 'V', or 'B', + * LWORK >= MAX(2*N, 2*SDIM*(N-SDIM)). + * ++* If LWORK = -1, a workspace query is assumed. The optimal ++* size for the WORK array is calculated and stored in WORK(1), ++* and no other work except argument checking is performed. ++* + * RWORK (workspace) DOUBLE PRECISION array, dimension ( 8*N ) + * Real workspace. + * +@@ -198,6 +203,8 @@ + * ===================================================================== + * + * .. Parameters .. ++ INTEGER LQUERV ++ PARAMETER ( LQUERV = -1 ) + DOUBLE PRECISION ZERO, ONE + PARAMETER ( ZERO = 0.0D+0, ONE = 1.0D+0 ) + COMPLEX*16 CZERO, CONE +@@ -217,8 +224,9 @@ + DOUBLE PRECISION DIF( 2 ) + * .. + * .. External Subroutines .. +- EXTERNAL XERBLA, ZGEQRF, ZGGBAK, ZGGBAL, ZGGHRD, ZHGEQZ, +- $ ZLACPY, ZLASCL, ZLASET, ZTGSEN, ZUNGQR, ZUNMQR ++ EXTERNAL DLABAD, XERBLA, ZGEQRF, ZGGBAK, ZGGBAL, ZGGHRD, ++ $ ZHGEQZ, ZLACPY, ZLASCL, ZLASET, ZTGSEN, ZUNGQR, ++ $ ZUNMQR + * .. + * .. External Functions .. + LOGICAL LSAME +@@ -303,14 +311,22 @@ + * following subroutine, as returned by ILAENV.) + * + MINWRK = 1 +- IF( INFO.EQ.0 .AND. LWORK.GE.1 ) THEN ++ IF( INFO.EQ.0 ) THEN + MINWRK = MAX( 1, 2*N ) + MAXWRK = N + N*ILAENV( 1, 'ZGEQRF', ' ', N, 1, N, 0 ) + IF( ILVSL ) THEN + MAXWRK = MAX( MAXWRK, N+N*ILAENV( 1, 'ZUNGQR', ' ', N, 1, N, + $ -1 ) ) + END IF ++* ++* Estimate the workspace needed by ZTGSEN. ++* ++ IF( WANTST ) THEN ++ MAXWRK = MAX( MAXWRK, ( N*N+1 ) / 2 ) ++ END IF + WORK( 1 ) = MAXWRK ++ IF( LWORK.LT.MINWRK .AND. LWORK.NE.LQUERV ) ++ $ INFO = -21 + END IF + IF( .NOT.WANTSN ) THEN + LIWMIN = N + 2 +@@ -318,21 +334,19 @@ + LIWMIN = 1 + END IF + IWORK( 1 ) = LIWMIN +-* +- IF( INFO.EQ.0 .AND. LWORK.LT.MINWRK ) THEN +- INFO = -21 +- ELSE IF( INFO.EQ.0 .AND. IJOB.GE.1 ) THEN ++ IF( INFO.EQ.0 .AND. IJOB.GE.1 ) THEN + IF( LIWORK.LT.LIWMIN ) + $ INFO = -24 + END IF + * ++* Quick returns ++* + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'ZGGESX', -INFO ) + RETURN + END IF +-* +-* Quick return if possible +-* ++ IF( LWORK.EQ.LQUERV ) ++ $ RETURN + IF( N.EQ.0 ) THEN + SDIM = 0 + RETURN +diff -uNr LAPACK.orig/SRC/zggev.f LAPACK/SRC/zggev.f +--- LAPACK.orig/SRC/zggev.f Thu Nov 4 14:26:21 1999 ++++ LAPACK/SRC/zggev.f Fri May 25 16:10:25 2001 +@@ -5,6 +5,7 @@ + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University + * June 30, 1999 ++* 8-15-00: Improve consistency of WS calculations (eca) + * + * .. Scalar Arguments .. + CHARACTER JOBVL, JOBVR +@@ -113,10 +114,9 @@ + * The dimension of the array WORK. LWORK >= max(1,2*N). + * For good performance, LWORK must generally be larger. + * +-* If LWORK = -1, then a workspace query is assumed; the routine +-* only calculates the optimal size of the WORK array, returns +-* this value as the first entry of the WORK array, and no error +-* message related to LWORK is issued by XERBLA. ++* If LWORK = -1, a workspace query is assumed. The optimal ++* size for the WORK array is calculated and stored in WORK(1), ++* and no other work except argument checking is performed. + * + * RWORK (workspace/output) DOUBLE PRECISION array, dimension (8*N) + * +@@ -133,6 +133,8 @@ + * ===================================================================== + * + * .. Parameters .. ++ INTEGER LQUERV ++ PARAMETER ( LQUERV = -1 ) + DOUBLE PRECISION ZERO, ONE + PARAMETER ( ZERO = 0.0D0, ONE = 1.0D0 ) + COMPLEX*16 CZERO, CONE +@@ -140,7 +142,7 @@ + $ CONE = ( 1.0D0, 0.0D0 ) ) + * .. + * .. Local Scalars .. +- LOGICAL ILASCL, ILBSCL, ILV, ILVL, ILVR, LQUERY ++ LOGICAL ILASCL, ILBSCL, ILV, ILVL, ILVR + CHARACTER CHTEMP + INTEGER ICOLS, IERR, IHI, IJOBVL, IJOBVR, ILEFT, ILO, + $ IN, IRIGHT, IROWS, IRWRK, ITAU, IWRK, JC, JR, +@@ -153,8 +155,9 @@ + LOGICAL LDUMMA( 1 ) + * .. + * .. External Subroutines .. +- EXTERNAL XERBLA, ZGEQRF, ZGGBAK, ZGGBAL, ZGGHRD, ZHGEQZ, +- $ ZLACPY, ZLASCL, ZLASET, ZTGEVC, ZUNGQR, ZUNMQR ++ EXTERNAL DLABAD, XERBLA, ZGEQRF, ZGGBAK, ZGGBAL, ZGGHRD, ++ $ ZHGEQZ, ZLACPY, ZLASCL, ZLASET, ZTGEVC, ZUNGQR, ++ $ ZUNMQR + * .. + * .. External Functions .. + LOGICAL LSAME +@@ -201,7 +204,6 @@ + * Test the input arguments + * + INFO = 0 +- LQUERY = ( LWORK.EQ.-1 ) + IF( IJOBVL.LE.0 ) THEN + INFO = -1 + ELSE IF( IJOBVR.LE.0 ) THEN +@@ -227,25 +229,22 @@ + * computed assuming ILO = 1 and IHI = N, the worst case.) + * + LWKMIN = 1 +- IF( INFO.EQ.0 .AND. ( LWORK.GE.1 .OR. LQUERY ) ) THEN ++ IF( INFO.EQ.0 ) THEN + LWKOPT = N + N*ILAENV( 1, 'ZGEQRF', ' ', N, 1, N, 0 ) + LWKMIN = MAX( 1, 2*N ) + WORK( 1 ) = LWKOPT ++ IF( LWORK.LT.LWKMIN .AND. LWORK.NE.LQUERV ) ++ $ INFO = -15 + END IF + * +- IF( LWORK.LT.LWKMIN .AND. .NOT.LQUERY ) +- $ INFO = -15 ++* Quick returns + * + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'ZGGEV ', -INFO ) + RETURN +- ELSE IF( LQUERY ) THEN +- RETURN + END IF +-* +-* Quick return if possible +-* +- WORK( 1 ) = LWKOPT ++ IF( LWORK.EQ.LQUERV ) ++ $ RETURN + IF( N.EQ.0 ) + $ RETURN + * +diff -uNr LAPACK.orig/SRC/zggevx.f LAPACK/SRC/zggevx.f +--- LAPACK.orig/SRC/zggevx.f Thu Nov 4 14:26:21 1999 ++++ LAPACK/SRC/zggevx.f Fri May 25 16:11:40 2001 +@@ -7,6 +7,7 @@ + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University + * June 30, 1999 ++* 8-15-00: Improve consistency of WS calculations (eca) + * + * .. Scalar Arguments .. + CHARACTER BALANC, JOBVL, JOBVR, SENSE +@@ -194,10 +195,9 @@ + * If SENSE = 'N' or 'E', LWORK >= 2*N. + * If SENSE = 'V' or 'B', LWORK >= 2*N*N+2*N. + * +-* If LWORK = -1, then a workspace query is assumed; the routine +-* only calculates the optimal size of the WORK array, returns +-* this value as the first entry of the WORK array, and no error +-* message related to LWORK is issued by XERBLA. ++* If LWORK = -1, a workspace query is assumed. The optimal ++* size for the WORK array is calculated and stored in WORK(1), ++* and no other work except argument checking is performed. + * + * RWORK (workspace) DOUBLE PRECISION array, dimension (6*N) + * Real workspace. +@@ -247,6 +247,8 @@ + * ===================================================================== + * + * .. Parameters .. ++ INTEGER LQUERV ++ PARAMETER ( LQUERV = -1 ) + DOUBLE PRECISION ZERO, ONE + PARAMETER ( ZERO = 0.0D+0, ONE = 1.0D+0 ) + COMPLEX*16 CZERO, CONE +@@ -254,8 +256,8 @@ + $ CONE = ( 1.0D+0, 0.0D+0 ) ) + * .. + * .. Local Scalars .. +- LOGICAL ILASCL, ILBSCL, ILV, ILVL, ILVR, LQUERY, +- $ WANTSB, WANTSE, WANTSN, WANTSV ++ LOGICAL ILASCL, ILBSCL, ILV, ILVL, ILVR, WANTSB, ++ $ WANTSE, WANTSN, WANTSV + CHARACTER CHTEMP + INTEGER I, ICOLS, IERR, IJOBVL, IJOBVR, IN, IROWS, + $ ITAU, IWRK, IWRK1, J, JC, JR, M, MAXWRK, MINWRK +@@ -267,9 +269,9 @@ + LOGICAL LDUMMA( 1 ) + * .. + * .. External Subroutines .. +- EXTERNAL DLASCL, XERBLA, ZGEQRF, ZGGBAK, ZGGBAL, ZGGHRD, +- $ ZHGEQZ, ZLACPY, ZLASCL, ZLASET, ZTGEVC, ZTGSNA, +- $ ZUNGQR, ZUNMQR ++ EXTERNAL DLABAD, DLASCL, XERBLA, ZGEQRF, ZGGBAK, ZGGBAL, ++ $ ZGGHRD, ZHGEQZ, ZLACPY, ZLASCL, ZLASET, ZTGEVC, ++ $ ZTGSNA, ZUNGQR, ZUNMQR + * .. + * .. External Functions .. + LOGICAL LSAME +@@ -321,7 +323,6 @@ + * Test the input arguments + * + INFO = 0 +- LQUERY = ( LWORK.EQ.-1 ) + IF( .NOT.( LSAME( BALANC, 'N' ) .OR. LSAME( BALANC, + $ 'S' ) .OR. LSAME( BALANC, 'P' ) .OR. LSAME( BALANC, 'B' ) ) ) + $ THEN +@@ -354,7 +355,7 @@ + * computed assuming ILO = 1 and IHI = N, the worst case.) + * + MINWRK = 1 +- IF( INFO.EQ.0 .AND. ( LWORK.GE.1 .OR. LQUERY ) ) THEN ++ IF( INFO.EQ.0 ) THEN + MAXWRK = N + N*ILAENV( 1, 'ZGEQRF', ' ', N, 1, N, 0 ) + IF( WANTSE ) THEN + MINWRK = MAX( 1, 2*N ) +@@ -363,21 +364,18 @@ + MAXWRK = MAX( MAXWRK, 2*N*N+2*N ) + END IF + WORK( 1 ) = MAXWRK ++ IF( LWORK.LT.MINWRK .AND. LWORK.NE.LQUERV ) ++ $ INFO = -25 + END IF + * +- IF( LWORK.LT.MINWRK .AND. .NOT.LQUERY ) THEN +- INFO = -25 +- END IF ++* Quick returns + * + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'ZGGEVX', -INFO ) + RETURN +- ELSE IF( LQUERY ) THEN +- RETURN + END IF +-* +-* Quick return if possible +-* ++ IF( LWORK.EQ.LQUERV ) ++ $ RETURN + IF( N.EQ.0 ) + $ RETURN + * +diff -uNr LAPACK.orig/SRC/zgghrd.f LAPACK/SRC/zgghrd.f +--- LAPACK.orig/SRC/zgghrd.f Thu Nov 4 14:25:45 1999 ++++ LAPACK/SRC/zgghrd.f Fri May 25 16:11:59 2001 +@@ -4,7 +4,7 @@ + * -- LAPACK routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* September 30, 1994 ++* April 26, 2001 + * + * .. Scalar Arguments .. + CHARACTER COMPQ, COMPZ +@@ -20,16 +20,29 @@ + * + * ZGGHRD reduces a pair of complex matrices (A,B) to generalized upper + * Hessenberg form using unitary transformations, where A is a +-* general matrix and B is upper triangular: Q' * A * Z = H and +-* Q' * B * Z = T, where H is upper Hessenberg, T is upper triangular, +-* and Q and Z are unitary, and ' means conjugate transpose. ++* general matrix and B is upper triangular. The form of the ++* generalized eigenvalue problem is ++* A*x = lambda*B*x, ++* and B is typically made upper triangular by computing its QR ++* factorization and moving the unitary matrix Q to the left side ++* of the equation. ++* ++* This subroutine simultaneously reduces A to a Hessenberg matrix H: ++* Q**H*A*Z = H ++* and transforms B to another upper triangular matrix T: ++* Q**H*B*Z = T ++* in order to reduce the problem to its standard form ++* H*y = lambda*T*y ++* where y = Z**H*x. + * + * The unitary matrices Q and Z are determined as products of Givens + * rotations. They may either be formed explicitly, or they may be + * postmultiplied into input matrices Q1 and Z1, so that +-* +-* Q1 * A * Z1' = (Q1*Q) * H * (Z1*Z)' +-* Q1 * B * Z1' = (Q1*Q) * T * (Z1*Z)' ++* Q1 * A * Z1**H = (Q1*Q) * H * (Z1*Z)**H ++* Q1 * B * Z1**H = (Q1*Q) * T * (Z1*Z)**H ++* If Q1 is the unitary matrix from the QR factorization of B in the ++* original equation A*x = lambda*B*x, then ZGGHRD reduces the original ++* problem to generalized Hessenberg form. + * + * Arguments + * ========= +@@ -53,10 +66,11 @@ + * + * ILO (input) INTEGER + * IHI (input) INTEGER +-* It is assumed that A is already upper triangular in rows and +-* columns 1:ILO-1 and IHI+1:N. ILO and IHI are normally set +-* by a previous call to ZGGBAL; otherwise they should be set +-* to 1 and N respectively. ++* ILO and IHI mark the rows and columns of A which are to be ++* reduced. It is assumed that A is already upper triangular ++* in rows and columns 1:ILO-1 and IHI+1:N. ILO and IHI are ++* normally set by a previous call to ZGGBAL; otherwise they ++* should be set to 1 and N respectively. + * 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0. + * + * A (input/output) COMPLEX*16 array, dimension (LDA, N) +@@ -70,33 +84,28 @@ + * + * B (input/output) COMPLEX*16 array, dimension (LDB, N) + * On entry, the N-by-N upper triangular matrix B. +-* On exit, the upper triangular matrix T = Q' B Z. The ++* On exit, the upper triangular matrix T = Q**H B Z. The + * elements below the diagonal are set to zero. + * + * LDB (input) INTEGER + * The leading dimension of the array B. LDB >= max(1,N). + * + * Q (input/output) COMPLEX*16 array, dimension (LDQ, N) +-* If COMPQ='N': Q is not referenced. +-* If COMPQ='I': on entry, Q need not be set, and on exit it +-* contains the unitary matrix Q, where Q' +-* is the product of the Givens transformations +-* which are applied to A and B on the left. +-* If COMPQ='V': on entry, Q must contain a unitary matrix +-* Q1, and on exit this is overwritten by Q1*Q. ++* On entry, if COMPQ = 'V', the unitary matrix Q1, typically ++* from the QR factorization of B. ++* On exit, if COMPQ='I', the unitary matrix Q, and if ++* COMPQ = 'V', the product Q1*Q. ++* Not referenced if COMPQ='N'. + * + * LDQ (input) INTEGER + * The leading dimension of the array Q. + * LDQ >= N if COMPQ='V' or 'I'; LDQ >= 1 otherwise. + * + * Z (input/output) COMPLEX*16 array, dimension (LDZ, N) +-* If COMPZ='N': Z is not referenced. +-* If COMPZ='I': on entry, Z need not be set, and on exit it +-* contains the unitary matrix Z, which is +-* the product of the Givens transformations +-* which are applied to A and B on the right. +-* If COMPZ='V': on entry, Z must contain a unitary matrix +-* Z1, and on exit this is overwritten by Z1*Z. ++* On entry, if COMPZ = 'V', the unitary matrix Z1. ++* On exit, if COMPZ='I', the unitary matrix Z, and if ++* COMPZ = 'V', the product Z1*Z. ++* Not referenced if COMPZ='N'. + * + * LDZ (input) INTEGER + * The leading dimension of the array Z. +diff -uNr LAPACK.orig/SRC/zhbgst.f LAPACK/SRC/zhbgst.f +--- LAPACK.orig/SRC/zhbgst.f Thu Nov 4 14:23:32 1999 ++++ LAPACK/SRC/zhbgst.f Fri May 25 16:13:00 2001 +@@ -4,7 +4,7 @@ + * -- LAPACK routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* June 30, 1999 ++* January 9, 2001 + * + * .. Scalar Arguments .. + CHARACTER UPLO, VECT +@@ -131,7 +131,7 @@ + INFO = -3 + ELSE IF( KA.LT.0 ) THEN + INFO = -4 +- ELSE IF( KB.LT.0 ) THEN ++ ELSE IF( KB.LT.0 .OR. KB.GT.KA ) THEN + INFO = -5 + ELSE IF( LDAB.LT.KA+1 ) THEN + INFO = -7 +diff -uNr LAPACK.orig/SRC/zhgeqz.f LAPACK/SRC/zhgeqz.f +--- LAPACK.orig/SRC/zhgeqz.f Thu Nov 4 14:25:05 1999 ++++ LAPACK/SRC/zhgeqz.f Fri May 25 16:12:21 2001 +@@ -1,43 +1,64 @@ +- SUBROUTINE ZHGEQZ( JOB, COMPQ, COMPZ, N, ILO, IHI, A, LDA, B, LDB, ++ SUBROUTINE ZHGEQZ( JOB, COMPQ, COMPZ, N, ILO, IHI, H, LDH, T, LDT, + $ ALPHA, BETA, Q, LDQ, Z, LDZ, WORK, LWORK, + $ RWORK, INFO ) + * + * -- LAPACK routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* June 30, 1999 ++* May 3, 2001 + * + * .. Scalar Arguments .. + CHARACTER COMPQ, COMPZ, JOB +- INTEGER IHI, ILO, INFO, LDA, LDB, LDQ, LDZ, LWORK, N ++ INTEGER IHI, ILO, INFO, LDH, LDQ, LDT, LDZ, LWORK, N + * .. + * .. Array Arguments .. + DOUBLE PRECISION RWORK( * ) +- COMPLEX*16 A( LDA, * ), ALPHA( * ), B( LDB, * ), +- $ BETA( * ), Q( LDQ, * ), WORK( * ), Z( LDZ, * ) ++ COMPLEX*16 ALPHA( * ), BETA( * ), H( LDH, * ), ++ $ Q( LDQ, * ), T( LDT, * ), WORK( * ), ++ $ Z( LDZ, * ) + * .. + * + * Purpose + * ======= + * +-* ZHGEQZ implements a single-shift version of the QZ +-* method for finding the generalized eigenvalues w(i)=ALPHA(i)/BETA(i) +-* of the equation +-* +-* det( A - w(i) B ) = 0 +-* +-* If JOB='S', then the pair (A,B) is simultaneously +-* reduced to Schur form (i.e., A and B are both upper triangular) by +-* applying one unitary tranformation (usually called Q) on the left and +-* another (usually called Z) on the right. The diagonal elements of +-* A are then ALPHA(1),...,ALPHA(N), and of B are BETA(1),...,BETA(N). +-* +-* If JOB='S' and COMPQ and COMPZ are 'V' or 'I', then the unitary +-* transformations used to reduce (A,B) are accumulated into the arrays +-* Q and Z s.t.: +-* +-* Q(in) A(in) Z(in)* = Q(out) A(out) Z(out)* +-* Q(in) B(in) Z(in)* = Q(out) B(out) Z(out)* ++* ZHGEQZ computes the eigenvalues of a complex matrix pair (H,T), ++* where H is an upper Hessenberg matrix and T is upper triangular, ++* using the single-shift QZ method. ++* Matrix pairs of this type are produced by the reduction to ++* generalized upper Hessenberg form of a complex matrix pair (A,B): ++* ++* A = Q1*H*Z1**H, B = Q1*T*Z1**H, ++* ++* as computed by ZGGHRD. ++* ++* If JOB='S', then the Hessenberg-triangular pair (H,T) is ++* also reduced to generalized Schur form, ++* ++* H = Q*S*Z**H, T = Q*P*Z**H, ++* ++* where Q and Z are unitary matrices and S and P are upper triangular. ++* ++* Optionally, the unitary matrix Q from the generalized Schur ++* factorization may be postmultiplied into an input matrix Q1, and the ++* unitary matrix Z may be postmultiplied into an input matrix Z1. ++* If Q1 and Z1 are the unitary matrices from ZGGHRD that reduced ++* the matrix pair (A,B) to generalized Hessenberg form, then the output ++* matrices Q1*Q and Z1*Z are the unitary factors from the generalized ++* Schur factorization of (A,B): ++* ++* A = (Q1*Q)*S*(Z1*Z)**H, B = (Q1*Q)*P*(Z1*Z)**H. ++* ++* To avoid overflow, eigenvalues of the matrix pair (H,T) ++* (equivalently, of (A,B)) are computed as a pair of complex values ++* (alpha,beta). If beta is nonzero, lambda = alpha / beta is an ++* eigenvalue of the generalized nonsymmetric eigenvalue problem (GNEP) ++* A*x = lambda*B*x ++* and if alpha is nonzero, mu = beta / alpha is an eigenvalue of the ++* alternate form of the GNEP ++* mu*A*y = B*y. ++* The values of alpha and beta for the i-th eigenvalue can be read ++* directly from the generalized Schur form: alpha = S(i,i), ++* beta = P(i,i). + * + * Ref: C.B. Moler & G.W. Stewart, "An Algorithm for Generalized Matrix + * Eigenvalue Problems", SIAM J. Numer. Anal., 10(1973), +@@ -47,83 +68,88 @@ + * ========= + * + * JOB (input) CHARACTER*1 +-* = 'E': compute only ALPHA and BETA. A and B will not +-* necessarily be put into generalized Schur form. +-* = 'S': put A and B into generalized Schur form, as well +-* as computing ALPHA and BETA. ++* = 'E': Compute eigenvalues only; ++* = 'S': Computer eigenvalues and the Schur form. + * + * COMPQ (input) CHARACTER*1 +-* = 'N': do not modify Q. +-* = 'V': multiply the array Q on the right by the conjugate +-* transpose of the unitary tranformation that is +-* applied to the left side of A and B to reduce them +-* to Schur form. +-* = 'I': like COMPQ='V', except that Q will be initialized to +-* the identity first. ++* = 'N': Left Schur vectors (Q) are not computed; ++* = 'I': Q is initialized to the unit matrix and the matrix Q ++* of left Schur vectors of (H,T) is returned; ++* = 'V': Q must contain a unitary matrix Q1 on entry and ++* the product Q1*Q is returned. + * + * COMPZ (input) CHARACTER*1 +-* = 'N': do not modify Z. +-* = 'V': multiply the array Z on the right by the unitary +-* tranformation that is applied to the right side of +-* A and B to reduce them to Schur form. +-* = 'I': like COMPZ='V', except that Z will be initialized to +-* the identity first. ++* = 'N': Right Schur vectors (Z) are not computed; ++* = 'I': Q is initialized to the unit matrix and the matrix Z ++* of right Schur vectors of (H,T) is returned; ++* = 'V': Z must contain a unitary matrix Z1 on entry and ++* the product Z1*Z is returned. + * + * N (input) INTEGER +-* The order of the matrices A, B, Q, and Z. N >= 0. ++* The order of the matrices H, T, Q, and Z. N >= 0. + * + * ILO (input) INTEGER + * IHI (input) INTEGER +-* It is assumed that A is already upper triangular in rows and +-* columns 1:ILO-1 and IHI+1:N. +-* 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0. +-* +-* A (input/output) COMPLEX*16 array, dimension (LDA, N) +-* On entry, the N-by-N upper Hessenberg matrix A. Elements +-* below the subdiagonal must be zero. +-* If JOB='S', then on exit A and B will have been +-* simultaneously reduced to upper triangular form. +-* If JOB='E', then on exit A will have been destroyed. +-* +-* LDA (input) INTEGER +-* The leading dimension of the array A. LDA >= max( 1, N ). +-* +-* B (input/output) COMPLEX*16 array, dimension (LDB, N) +-* On entry, the N-by-N upper triangular matrix B. Elements +-* below the diagonal must be zero. +-* If JOB='S', then on exit A and B will have been +-* simultaneously reduced to upper triangular form. +-* If JOB='E', then on exit B will have been destroyed. ++* ILO and IHI mark the rows and columns of H which are in ++* Hessenberg form. It is assumed that A is already upper ++* triangular in rows and columns 1:ILO-1 and IHI+1:N. ++* If N > 0, 1 <= ILO <= IHI <= N; if N = 0, ILO=1 and IHI=0. ++* ++* H (input/output) COMPLEX*16 array, dimension (LDH, N) ++* On entry, the N-by-N upper Hessenberg matrix H. ++* On exit, if JOB = 'S', H contains the upper triangular ++* matrix S from the generalized Schur factorization. ++* If JOB = 'E', the diagonal of H matches that of S, but ++* the rest of H is unspecified. ++* ++* LDH (input) INTEGER ++* The leading dimension of the array H. LDH >= max( 1, N ). ++* ++* T (input/output) COMPLEX*16 array, dimension (LDT, N) ++* On entry, the N-by-N upper triangular matrix T. ++* On exit, if JOB = 'S', T contains the upper triangular ++* matrix P from the generalized Schur factorization. ++* If JOB = 'E', the diagonal of T matches that of P, but ++* the rest of T is unspecified. + * +-* LDB (input) INTEGER +-* The leading dimension of the array B. LDB >= max( 1, N ). ++* LDT (input) INTEGER ++* The leading dimension of the array T. LDT >= max( 1, N ). + * + * ALPHA (output) COMPLEX*16 array, dimension (N) +-* The diagonal elements of A when the pair (A,B) has been +-* reduced to Schur form. ALPHA(i)/BETA(i) i=1,...,N +-* are the generalized eigenvalues. ++* The complex scalars alpha that define the eigenvalues of ++* GNEP. ALPHA(i) = S(i,i) in the generalized Schur ++* factorization. + * + * BETA (output) COMPLEX*16 array, dimension (N) +-* The diagonal elements of B when the pair (A,B) has been +-* reduced to Schur form. ALPHA(i)/BETA(i) i=1,...,N +-* are the generalized eigenvalues. A and B are normalized +-* so that BETA(1),...,BETA(N) are non-negative real numbers. ++* The real non-negative scalars beta that define the ++* eigenvalues of GNEP. BETA(i) = P(i,i) in the generalized ++* Schur factorization. ++* ++* Together, the quantities alpha = ALPHA(j) and beta = BETA(j) ++* represent the j-th eigenvalue of the matrix pair (A,B), in ++* one of the forms lambda = alpha/beta or mu = beta/alpha. ++* Since either lambda or mu may overflow, they should not, ++* in general, be computed. + * + * Q (input/output) COMPLEX*16 array, dimension (LDQ, N) +-* If COMPQ='N', then Q will not be referenced. +-* If COMPQ='V' or 'I', then the conjugate transpose of the +-* unitary transformations which are applied to A and B on +-* the left will be applied to the array Q on the right. ++* On entry, if COMPZ = 'V', the unitary matrix Q1 used in the ++* reduction of (A,B) to generalized Hessenberg form. ++* On exit, if COMPZ = 'I', the unitary matrix of left Schur ++* vectors of (H,T), and if COMPZ = 'V', the unitary matrix of ++* left Schur vectors of (A,B). ++* Not referenced if COMPZ = 'N'. + * + * LDQ (input) INTEGER + * The leading dimension of the array Q. LDQ >= 1. + * If COMPQ='V' or 'I', then LDQ >= N. + * + * Z (input/output) COMPLEX*16 array, dimension (LDZ, N) +-* If COMPZ='N', then Z will not be referenced. +-* If COMPZ='V' or 'I', then the unitary transformations which +-* are applied to A and B on the right will be applied to the +-* array Z on the right. ++* On entry, if COMPZ = 'V', the unitary matrix Z1 used in the ++* reduction of (A,B) to generalized Hessenberg form. ++* On exit, if COMPZ = 'I', the unitary matrix of right Schur ++* vectors of (H,T), and if COMPZ = 'V', the unitary matrix of ++* right Schur vectors of (A,B). ++* Not referenced if COMPZ = 'N'. + * + * LDZ (input) INTEGER + * The leading dimension of the array Z. LDZ >= 1. +@@ -145,13 +171,12 @@ + * INFO (output) INTEGER + * = 0: successful exit + * < 0: if INFO = -i, the i-th argument had an illegal value +-* = 1,...,N: the QZ iteration did not converge. (A,B) is not ++* = 1,...,N: the QZ iteration did not converge. (H,T) is not + * in Schur form, but ALPHA(i) and BETA(i), + * i=INFO+1,...,N should be correct. +-* = N+1,...,2*N: the shift calculation failed. (A,B) is not ++* = N+1,...,2*N: the shift calculation failed. (H,T) is not + * in Schur form, but ALPHA(i) and BETA(i), + * i=INFO-N+1,...,N should be correct. +-* > 2*N: various "impossible" errors. + * + * Further Details + * =============== +@@ -178,7 +203,7 @@ + DOUBLE PRECISION ABSB, ANORM, ASCALE, ATOL, BNORM, BSCALE, BTOL, + $ C, SAFMIN, TEMP, TEMP2, TEMPR, ULP + COMPLEX*16 ABI22, AD11, AD12, AD21, AD22, CTEMP, CTEMP2, +- $ CTEMP3, ESHIFT, RTDISC, S, SHIFT, SIGNBC, T, ++ $ CTEMP3, ESHIFT, RTDISC, S, SHIFT, SIGNBC, T1, + $ U12, X + * .. + * .. External Functions .. +@@ -256,9 +281,9 @@ + INFO = -5 + ELSE IF( IHI.GT.N .OR. IHI.LT.ILO-1 ) THEN + INFO = -6 +- ELSE IF( LDA.LT.N ) THEN ++ ELSE IF( LDH.LT.N ) THEN + INFO = -8 +- ELSE IF( LDB.LT.N ) THEN ++ ELSE IF( LDT.LT.N ) THEN + INFO = -10 + ELSE IF( LDQ.LT.1 .OR. ( ILQ .AND. LDQ.LT.N ) ) THEN + INFO = -14 +@@ -294,8 +319,8 @@ + IN = IHI + 1 - ILO + SAFMIN = DLAMCH( 'S' ) + ULP = DLAMCH( 'E' )*DLAMCH( 'B' ) +- ANORM = ZLANHS( 'F', IN, A( ILO, ILO ), LDA, RWORK ) +- BNORM = ZLANHS( 'F', IN, B( ILO, ILO ), LDB, RWORK ) ++ ANORM = ZLANHS( 'F', IN, H( ILO, ILO ), LDH, RWORK ) ++ BNORM = ZLANHS( 'F', IN, T( ILO, ILO ), LDT, RWORK ) + ATOL = MAX( SAFMIN, ULP*ANORM ) + BTOL = MAX( SAFMIN, ULP*BNORM ) + ASCALE = ONE / MAX( SAFMIN, ANORM ) +@@ -305,23 +330,23 @@ + * Set Eigenvalues IHI+1:N + * + DO 10 J = IHI + 1, N +- ABSB = ABS( B( J, J ) ) ++ ABSB = ABS( T( J, J ) ) + IF( ABSB.GT.SAFMIN ) THEN +- SIGNBC = DCONJG( B( J, J ) / ABSB ) +- B( J, J ) = ABSB ++ SIGNBC = DCONJG( T( J, J ) / ABSB ) ++ T( J, J ) = ABSB + IF( ILSCHR ) THEN +- CALL ZSCAL( J-1, SIGNBC, B( 1, J ), 1 ) +- CALL ZSCAL( J, SIGNBC, A( 1, J ), 1 ) ++ CALL ZSCAL( J-1, SIGNBC, T( 1, J ), 1 ) ++ CALL ZSCAL( J, SIGNBC, H( 1, J ), 1 ) + ELSE +- A( J, J ) = A( J, J )*SIGNBC ++ H( J, J ) = H( J, J )*SIGNBC + END IF + IF( ILZ ) + $ CALL ZSCAL( N, SIGNBC, Z( 1, J ), 1 ) + ELSE +- B( J, J ) = CZERO ++ T( J, J ) = CZERO + END IF +- ALPHA( J ) = A( J, J ) +- BETA( J ) = B( J, J ) ++ ALPHA( J ) = H( J, J ) ++ BETA( J ) = T( J, J ) + 10 CONTINUE + * + * If IHI < ILO, skip QZ steps +@@ -366,22 +391,22 @@ + * Split the matrix if possible. + * + * Two tests: +-* 1: A(j,j-1)=0 or j=ILO +-* 2: B(j,j)=0 ++* 1: H(j,j-1)=0 or j=ILO ++* 2: T(j,j)=0 + * + * Special case: j=ILAST + * + IF( ILAST.EQ.ILO ) THEN + GO TO 60 + ELSE +- IF( ABS1( A( ILAST, ILAST-1 ) ).LE.ATOL ) THEN +- A( ILAST, ILAST-1 ) = CZERO ++ IF( ABS1( H( ILAST, ILAST-1 ) ).LE.ATOL ) THEN ++ H( ILAST, ILAST-1 ) = CZERO + GO TO 60 + END IF + END IF + * +- IF( ABS( B( ILAST, ILAST ) ).LE.BTOL ) THEN +- B( ILAST, ILAST ) = CZERO ++ IF( ABS( T( ILAST, ILAST ) ).LE.BTOL ) THEN ++ T( ILAST, ILAST ) = CZERO + GO TO 50 + END IF + * +@@ -389,30 +414,30 @@ + * + DO 40 J = ILAST - 1, ILO, -1 + * +-* Test 1: for A(j,j-1)=0 or j=ILO ++* Test 1: for H(j,j-1)=0 or j=ILO + * + IF( J.EQ.ILO ) THEN + ILAZRO = .TRUE. + ELSE +- IF( ABS1( A( J, J-1 ) ).LE.ATOL ) THEN +- A( J, J-1 ) = CZERO ++ IF( ABS1( H( J, J-1 ) ).LE.ATOL ) THEN ++ H( J, J-1 ) = CZERO + ILAZRO = .TRUE. + ELSE + ILAZRO = .FALSE. + END IF + END IF + * +-* Test 2: for B(j,j)=0 ++* Test 2: for T(j,j)=0 + * +- IF( ABS( B( J, J ) ).LT.BTOL ) THEN +- B( J, J ) = CZERO ++ IF( ABS( T( J, J ) ).LT.BTOL ) THEN ++ T( J, J ) = CZERO + * + * Test 1a: Check for 2 consecutive small subdiagonals in A + * + ILAZR2 = .FALSE. + IF( .NOT.ILAZRO ) THEN +- IF( ABS1( A( J, J-1 ) )*( ASCALE*ABS1( A( J+1, +- $ J ) ) ).LE.ABS1( A( J, J ) )*( ASCALE*ATOL ) ) ++ IF( ABS1( H( J, J-1 ) )*( ASCALE*ABS1( H( J+1, ++ $ J ) ) ).LE.ABS1( H( J, J ) )*( ASCALE*ATOL ) ) + $ ILAZR2 = .TRUE. + END IF + * +@@ -424,21 +449,21 @@ + * + IF( ILAZRO .OR. ILAZR2 ) THEN + DO 20 JCH = J, ILAST - 1 +- CTEMP = A( JCH, JCH ) +- CALL ZLARTG( CTEMP, A( JCH+1, JCH ), C, S, +- $ A( JCH, JCH ) ) +- A( JCH+1, JCH ) = CZERO +- CALL ZROT( ILASTM-JCH, A( JCH, JCH+1 ), LDA, +- $ A( JCH+1, JCH+1 ), LDA, C, S ) +- CALL ZROT( ILASTM-JCH, B( JCH, JCH+1 ), LDB, +- $ B( JCH+1, JCH+1 ), LDB, C, S ) ++ CTEMP = H( JCH, JCH ) ++ CALL ZLARTG( CTEMP, H( JCH+1, JCH ), C, S, ++ $ H( JCH, JCH ) ) ++ H( JCH+1, JCH ) = CZERO ++ CALL ZROT( ILASTM-JCH, H( JCH, JCH+1 ), LDH, ++ $ H( JCH+1, JCH+1 ), LDH, C, S ) ++ CALL ZROT( ILASTM-JCH, T( JCH, JCH+1 ), LDT, ++ $ T( JCH+1, JCH+1 ), LDT, C, S ) + IF( ILQ ) + $ CALL ZROT( N, Q( 1, JCH ), 1, Q( 1, JCH+1 ), 1, + $ C, DCONJG( S ) ) + IF( ILAZR2 ) +- $ A( JCH, JCH-1 ) = A( JCH, JCH-1 )*C ++ $ H( JCH, JCH-1 ) = H( JCH, JCH-1 )*C + ILAZR2 = .FALSE. +- IF( ABS1( B( JCH+1, JCH+1 ) ).GE.BTOL ) THEN ++ IF( ABS1( T( JCH+1, JCH+1 ) ).GE.BTOL ) THEN + IF( JCH+1.GE.ILAST ) THEN + GO TO 60 + ELSE +@@ -446,35 +471,35 @@ + GO TO 70 + END IF + END IF +- B( JCH+1, JCH+1 ) = CZERO ++ T( JCH+1, JCH+1 ) = CZERO + 20 CONTINUE + GO TO 50 + ELSE + * +-* Only test 2 passed -- chase the zero to B(ILAST,ILAST) +-* Then process as in the case B(ILAST,ILAST)=0 ++* Only test 2 passed -- chase the zero to T(ILAST,ILAST) ++* Then process as in the case T(ILAST,ILAST)=0 + * + DO 30 JCH = J, ILAST - 1 +- CTEMP = B( JCH, JCH+1 ) +- CALL ZLARTG( CTEMP, B( JCH+1, JCH+1 ), C, S, +- $ B( JCH, JCH+1 ) ) +- B( JCH+1, JCH+1 ) = CZERO ++ CTEMP = T( JCH, JCH+1 ) ++ CALL ZLARTG( CTEMP, T( JCH+1, JCH+1 ), C, S, ++ $ T( JCH, JCH+1 ) ) ++ T( JCH+1, JCH+1 ) = CZERO + IF( JCH.LT.ILASTM-1 ) +- $ CALL ZROT( ILASTM-JCH-1, B( JCH, JCH+2 ), LDB, +- $ B( JCH+1, JCH+2 ), LDB, C, S ) +- CALL ZROT( ILASTM-JCH+2, A( JCH, JCH-1 ), LDA, +- $ A( JCH+1, JCH-1 ), LDA, C, S ) ++ $ CALL ZROT( ILASTM-JCH-1, T( JCH, JCH+2 ), LDT, ++ $ T( JCH+1, JCH+2 ), LDT, C, S ) ++ CALL ZROT( ILASTM-JCH+2, H( JCH, JCH-1 ), LDH, ++ $ H( JCH+1, JCH-1 ), LDH, C, S ) + IF( ILQ ) + $ CALL ZROT( N, Q( 1, JCH ), 1, Q( 1, JCH+1 ), 1, + $ C, DCONJG( S ) ) +- CTEMP = A( JCH+1, JCH ) +- CALL ZLARTG( CTEMP, A( JCH+1, JCH-1 ), C, S, +- $ A( JCH+1, JCH ) ) +- A( JCH+1, JCH-1 ) = CZERO +- CALL ZROT( JCH+1-IFRSTM, A( IFRSTM, JCH ), 1, +- $ A( IFRSTM, JCH-1 ), 1, C, S ) +- CALL ZROT( JCH-IFRSTM, B( IFRSTM, JCH ), 1, +- $ B( IFRSTM, JCH-1 ), 1, C, S ) ++ CTEMP = H( JCH+1, JCH ) ++ CALL ZLARTG( CTEMP, H( JCH+1, JCH-1 ), C, S, ++ $ H( JCH+1, JCH ) ) ++ H( JCH+1, JCH-1 ) = CZERO ++ CALL ZROT( JCH+1-IFRSTM, H( IFRSTM, JCH ), 1, ++ $ H( IFRSTM, JCH-1 ), 1, C, S ) ++ CALL ZROT( JCH-IFRSTM, T( IFRSTM, JCH ), 1, ++ $ T( IFRSTM, JCH-1 ), 1, C, S ) + IF( ILZ ) + $ CALL ZROT( N, Z( 1, JCH ), 1, Z( 1, JCH-1 ), 1, + $ C, S ) +@@ -498,42 +523,42 @@ + INFO = 2*N + 1 + GO TO 210 + * +-* B(ILAST,ILAST)=0 -- clear A(ILAST,ILAST-1) to split off a ++* T(ILAST,ILAST)=0 -- clear H(ILAST,ILAST-1) to split off a + * 1x1 block. + * + 50 CONTINUE +- CTEMP = A( ILAST, ILAST ) +- CALL ZLARTG( CTEMP, A( ILAST, ILAST-1 ), C, S, +- $ A( ILAST, ILAST ) ) +- A( ILAST, ILAST-1 ) = CZERO +- CALL ZROT( ILAST-IFRSTM, A( IFRSTM, ILAST ), 1, +- $ A( IFRSTM, ILAST-1 ), 1, C, S ) +- CALL ZROT( ILAST-IFRSTM, B( IFRSTM, ILAST ), 1, +- $ B( IFRSTM, ILAST-1 ), 1, C, S ) ++ CTEMP = H( ILAST, ILAST ) ++ CALL ZLARTG( CTEMP, H( ILAST, ILAST-1 ), C, S, ++ $ H( ILAST, ILAST ) ) ++ H( ILAST, ILAST-1 ) = CZERO ++ CALL ZROT( ILAST-IFRSTM, H( IFRSTM, ILAST ), 1, ++ $ H( IFRSTM, ILAST-1 ), 1, C, S ) ++ CALL ZROT( ILAST-IFRSTM, T( IFRSTM, ILAST ), 1, ++ $ T( IFRSTM, ILAST-1 ), 1, C, S ) + IF( ILZ ) + $ CALL ZROT( N, Z( 1, ILAST ), 1, Z( 1, ILAST-1 ), 1, C, S ) + * +-* A(ILAST,ILAST-1)=0 -- Standardize B, set ALPHA and BETA ++* H(ILAST,ILAST-1)=0 -- Standardize B, set ALPHA and BETA + * + 60 CONTINUE +- ABSB = ABS( B( ILAST, ILAST ) ) ++ ABSB = ABS( T( ILAST, ILAST ) ) + IF( ABSB.GT.SAFMIN ) THEN +- SIGNBC = DCONJG( B( ILAST, ILAST ) / ABSB ) +- B( ILAST, ILAST ) = ABSB ++ SIGNBC = DCONJG( T( ILAST, ILAST ) / ABSB ) ++ T( ILAST, ILAST ) = ABSB + IF( ILSCHR ) THEN +- CALL ZSCAL( ILAST-IFRSTM, SIGNBC, B( IFRSTM, ILAST ), 1 ) +- CALL ZSCAL( ILAST+1-IFRSTM, SIGNBC, A( IFRSTM, ILAST ), ++ CALL ZSCAL( ILAST-IFRSTM, SIGNBC, T( IFRSTM, ILAST ), 1 ) ++ CALL ZSCAL( ILAST+1-IFRSTM, SIGNBC, H( IFRSTM, ILAST ), + $ 1 ) + ELSE +- A( ILAST, ILAST ) = A( ILAST, ILAST )*SIGNBC ++ H( ILAST, ILAST ) = H( ILAST, ILAST )*SIGNBC + END IF + IF( ILZ ) + $ CALL ZSCAL( N, SIGNBC, Z( 1, ILAST ), 1 ) + ELSE +- B( ILAST, ILAST ) = CZERO ++ T( ILAST, ILAST ) = CZERO + END IF +- ALPHA( ILAST ) = A( ILAST, ILAST ) +- BETA( ILAST ) = B( ILAST, ILAST ) ++ ALPHA( ILAST ) = H( ILAST, ILAST ) ++ BETA( ILAST ) = T( ILAST, ILAST ) + * + * Go to next block -- exit if finished. + * +@@ -566,7 +591,7 @@ + * Compute the Shift. + * + * At this point, IFIRST < ILAST, and the diagonal elements of +-* B(IFIRST:ILAST,IFIRST,ILAST) are larger than BTOL (in ++* T(IFIRST:ILAST,IFIRST,ILAST) are larger than BTOL (in + * magnitude) + * + IF( ( IITER / 10 )*10.NE.IITER ) THEN +@@ -578,33 +603,33 @@ + * We factor B as U*D, where U has unit diagonals, and + * compute (A*inv(D))*inv(U). + * +- U12 = ( BSCALE*B( ILAST-1, ILAST ) ) / +- $ ( BSCALE*B( ILAST, ILAST ) ) +- AD11 = ( ASCALE*A( ILAST-1, ILAST-1 ) ) / +- $ ( BSCALE*B( ILAST-1, ILAST-1 ) ) +- AD21 = ( ASCALE*A( ILAST, ILAST-1 ) ) / +- $ ( BSCALE*B( ILAST-1, ILAST-1 ) ) +- AD12 = ( ASCALE*A( ILAST-1, ILAST ) ) / +- $ ( BSCALE*B( ILAST, ILAST ) ) +- AD22 = ( ASCALE*A( ILAST, ILAST ) ) / +- $ ( BSCALE*B( ILAST, ILAST ) ) ++ U12 = ( BSCALE*T( ILAST-1, ILAST ) ) / ++ $ ( BSCALE*T( ILAST, ILAST ) ) ++ AD11 = ( ASCALE*H( ILAST-1, ILAST-1 ) ) / ++ $ ( BSCALE*T( ILAST-1, ILAST-1 ) ) ++ AD21 = ( ASCALE*H( ILAST, ILAST-1 ) ) / ++ $ ( BSCALE*T( ILAST-1, ILAST-1 ) ) ++ AD12 = ( ASCALE*H( ILAST-1, ILAST ) ) / ++ $ ( BSCALE*T( ILAST, ILAST ) ) ++ AD22 = ( ASCALE*H( ILAST, ILAST ) ) / ++ $ ( BSCALE*T( ILAST, ILAST ) ) + ABI22 = AD22 - U12*AD21 + * +- T = HALF*( AD11+ABI22 ) +- RTDISC = SQRT( T**2+AD12*AD21-AD11*AD22 ) +- TEMP = DBLE( T-ABI22 )*DBLE( RTDISC ) + +- $ DIMAG( T-ABI22 )*DIMAG( RTDISC ) ++ T1 = HALF*( AD11+ABI22 ) ++ RTDISC = SQRT( T1**2+AD12*AD21-AD11*AD22 ) ++ TEMP = DBLE( T1-ABI22 )*DBLE( RTDISC ) + ++ $ DIMAG( T1-ABI22 )*DIMAG( RTDISC ) + IF( TEMP.LE.ZERO ) THEN +- SHIFT = T + RTDISC ++ SHIFT = T1 + RTDISC + ELSE +- SHIFT = T - RTDISC ++ SHIFT = T1 - RTDISC + END IF + ELSE + * + * Exceptional shift. Chosen for no particularly good reason. + * +- ESHIFT = ESHIFT + DCONJG( ( ASCALE*A( ILAST-1, ILAST ) ) / +- $ ( BSCALE*B( ILAST-1, ILAST-1 ) ) ) ++ ESHIFT = ESHIFT + DCONJG( ( ASCALE*H( ILAST-1, ILAST ) ) / ++ $ ( BSCALE*T( ILAST-1, ILAST-1 ) ) ) + SHIFT = ESHIFT + END IF + * +@@ -612,46 +637,46 @@ + * + DO 80 J = ILAST - 1, IFIRST + 1, -1 + ISTART = J +- CTEMP = ASCALE*A( J, J ) - SHIFT*( BSCALE*B( J, J ) ) ++ CTEMP = ASCALE*H( J, J ) - SHIFT*( BSCALE*T( J, J ) ) + TEMP = ABS1( CTEMP ) +- TEMP2 = ASCALE*ABS1( A( J+1, J ) ) ++ TEMP2 = ASCALE*ABS1( H( J+1, J ) ) + TEMPR = MAX( TEMP, TEMP2 ) + IF( TEMPR.LT.ONE .AND. TEMPR.NE.ZERO ) THEN + TEMP = TEMP / TEMPR + TEMP2 = TEMP2 / TEMPR + END IF +- IF( ABS1( A( J, J-1 ) )*TEMP2.LE.TEMP*ATOL ) ++ IF( ABS1( H( J, J-1 ) )*TEMP2.LE.TEMP*ATOL ) + $ GO TO 90 + 80 CONTINUE + * + ISTART = IFIRST +- CTEMP = ASCALE*A( IFIRST, IFIRST ) - +- $ SHIFT*( BSCALE*B( IFIRST, IFIRST ) ) ++ CTEMP = ASCALE*H( IFIRST, IFIRST ) - ++ $ SHIFT*( BSCALE*T( IFIRST, IFIRST ) ) + 90 CONTINUE + * + * Do an implicit-shift QZ sweep. + * + * Initial Q + * +- CTEMP2 = ASCALE*A( ISTART+1, ISTART ) ++ CTEMP2 = ASCALE*H( ISTART+1, ISTART ) + CALL ZLARTG( CTEMP, CTEMP2, C, S, CTEMP3 ) + * + * Sweep + * + DO 150 J = ISTART, ILAST - 1 + IF( J.GT.ISTART ) THEN +- CTEMP = A( J, J-1 ) +- CALL ZLARTG( CTEMP, A( J+1, J-1 ), C, S, A( J, J-1 ) ) +- A( J+1, J-1 ) = CZERO ++ CTEMP = H( J, J-1 ) ++ CALL ZLARTG( CTEMP, H( J+1, J-1 ), C, S, H( J, J-1 ) ) ++ H( J+1, J-1 ) = CZERO + END IF + * + DO 100 JC = J, ILASTM +- CTEMP = C*A( J, JC ) + S*A( J+1, JC ) +- A( J+1, JC ) = -DCONJG( S )*A( J, JC ) + C*A( J+1, JC ) +- A( J, JC ) = CTEMP +- CTEMP2 = C*B( J, JC ) + S*B( J+1, JC ) +- B( J+1, JC ) = -DCONJG( S )*B( J, JC ) + C*B( J+1, JC ) +- B( J, JC ) = CTEMP2 ++ CTEMP = C*H( J, JC ) + S*H( J+1, JC ) ++ H( J+1, JC ) = -DCONJG( S )*H( J, JC ) + C*H( J+1, JC ) ++ H( J, JC ) = CTEMP ++ CTEMP2 = C*T( J, JC ) + S*T( J+1, JC ) ++ T( J+1, JC ) = -DCONJG( S )*T( J, JC ) + C*T( J+1, JC ) ++ T( J, JC ) = CTEMP2 + 100 CONTINUE + IF( ILQ ) THEN + DO 110 JR = 1, N +@@ -661,19 +686,19 @@ + 110 CONTINUE + END IF + * +- CTEMP = B( J+1, J+1 ) +- CALL ZLARTG( CTEMP, B( J+1, J ), C, S, B( J+1, J+1 ) ) +- B( J+1, J ) = CZERO ++ CTEMP = T( J+1, J+1 ) ++ CALL ZLARTG( CTEMP, T( J+1, J ), C, S, T( J+1, J+1 ) ) ++ T( J+1, J ) = CZERO + * + DO 120 JR = IFRSTM, MIN( J+2, ILAST ) +- CTEMP = C*A( JR, J+1 ) + S*A( JR, J ) +- A( JR, J ) = -DCONJG( S )*A( JR, J+1 ) + C*A( JR, J ) +- A( JR, J+1 ) = CTEMP ++ CTEMP = C*H( JR, J+1 ) + S*H( JR, J ) ++ H( JR, J ) = -DCONJG( S )*H( JR, J+1 ) + C*H( JR, J ) ++ H( JR, J+1 ) = CTEMP + 120 CONTINUE + DO 130 JR = IFRSTM, J +- CTEMP = C*B( JR, J+1 ) + S*B( JR, J ) +- B( JR, J ) = -DCONJG( S )*B( JR, J+1 ) + C*B( JR, J ) +- B( JR, J+1 ) = CTEMP ++ CTEMP = C*T( JR, J+1 ) + S*T( JR, J ) ++ T( JR, J ) = -DCONJG( S )*T( JR, J+1 ) + C*T( JR, J ) ++ T( JR, J+1 ) = CTEMP + 130 CONTINUE + IF( ILZ ) THEN + DO 140 JR = 1, N +@@ -701,23 +726,23 @@ + * Set Eigenvalues 1:ILO-1 + * + DO 200 J = 1, ILO - 1 +- ABSB = ABS( B( J, J ) ) ++ ABSB = ABS( T( J, J ) ) + IF( ABSB.GT.SAFMIN ) THEN +- SIGNBC = DCONJG( B( J, J ) / ABSB ) +- B( J, J ) = ABSB ++ SIGNBC = DCONJG( T( J, J ) / ABSB ) ++ T( J, J ) = ABSB + IF( ILSCHR ) THEN +- CALL ZSCAL( J-1, SIGNBC, B( 1, J ), 1 ) +- CALL ZSCAL( J, SIGNBC, A( 1, J ), 1 ) ++ CALL ZSCAL( J-1, SIGNBC, T( 1, J ), 1 ) ++ CALL ZSCAL( J, SIGNBC, H( 1, J ), 1 ) + ELSE +- A( J, J ) = A( J, J )*SIGNBC ++ H( J, J ) = H( J, J )*SIGNBC + END IF + IF( ILZ ) + $ CALL ZSCAL( N, SIGNBC, Z( 1, J ), 1 ) + ELSE +- B( J, J ) = CZERO ++ T( J, J ) = CZERO + END IF +- ALPHA( J ) = A( J, J ) +- BETA( J ) = B( J, J ) ++ ALPHA( J ) = H( J, J ) ++ BETA( J ) = T( J, J ) + 200 CONTINUE + * + * Normal Termination +diff -uNr LAPACK.orig/SRC/zlasr.f LAPACK/SRC/zlasr.f +--- LAPACK.orig/SRC/zlasr.f Thu Nov 4 14:25:06 1999 ++++ LAPACK/SRC/zlasr.f Fri May 25 16:12:41 2001 +@@ -3,7 +3,7 @@ + * -- LAPACK auxiliary routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* October 31, 1992 ++* May 3, 2001 + * + * .. Scalar Arguments .. + CHARACTER DIRECT, PIVOT, SIDE +@@ -17,42 +17,77 @@ + * Purpose + * ======= + * +-* ZLASR performs the transformation ++* ZLASR applies a sequence of real plane rotations to a complex matrix ++* A, from either the left or the right. + * +-* A := P*A, when SIDE = 'L' or 'l' ( Left-hand side ) ++* When SIDE = 'L', the transformation takes the form + * +-* A := A*P', when SIDE = 'R' or 'r' ( Right-hand side ) ++* A := P*A + * +-* where A is an m by n complex matrix and P is an orthogonal matrix, +-* consisting of a sequence of plane rotations determined by the +-* parameters PIVOT and DIRECT as follows ( z = m when SIDE = 'L' or 'l' +-* and z = n when SIDE = 'R' or 'r' ): ++* and when SIDE = 'R', the transformation takes the form + * +-* When DIRECT = 'F' or 'f' ( Forward sequence ) then +-* +-* P = P( z - 1 )*...*P( 2 )*P( 1 ), +-* +-* and when DIRECT = 'B' or 'b' ( Backward sequence ) then +-* +-* P = P( 1 )*P( 2 )*...*P( z - 1 ), +-* +-* where P( k ) is a plane rotation matrix for the following planes: +-* +-* when PIVOT = 'V' or 'v' ( Variable pivot ), +-* the plane ( k, k + 1 ) +-* +-* when PIVOT = 'T' or 't' ( Top pivot ), +-* the plane ( 1, k + 1 ) +-* +-* when PIVOT = 'B' or 'b' ( Bottom pivot ), +-* the plane ( k, z ) +-* +-* c( k ) and s( k ) must contain the cosine and sine that define the +-* matrix P( k ). The two by two plane rotation part of the matrix +-* P( k ), R( k ), is assumed to be of the form +-* +-* R( k ) = ( c( k ) s( k ) ). +-* ( -s( k ) c( k ) ) ++* A := A*P**T ++* ++* where P is an orthogonal matrix consisting of a sequence of z plane ++* rotations, with z = M when SIDE = 'L' and z = N when SIDE = 'R', ++* and P**T is the transpose of P. ++* ++* When DIRECT = 'F' (Forward sequence), then ++* ++* P = P(z-1) * ... * P(2) * P(1) ++* ++* and when DIRECT = 'B' (Backward sequence), then ++* ++* P = P(1) * P(2) * ... * P(z-1) ++* ++* where P(k) is a plane rotation matrix defined by the 2-by-2 rotation ++* ++* R(k) = ( c(k) s(k) ) ++* = ( -s(k) c(k) ). ++* ++* When PIVOT = 'V' (Variable pivot), the rotation is performed ++* for the plane (k,k+1), i.e., P(k) has the form ++* ++* P(k) = ( 1 ) ++* ( ... ) ++* ( 1 ) ++* ( c(k) s(k) ) ++* ( -s(k) c(k) ) ++* ( 1 ) ++* ( ... ) ++* ( 1 ) ++* ++* where R(k) appears as a rank-2 modification to the identity matrix in ++* rows and columns k and k+1. ++* ++* When PIVOT = 'T' (Top pivot), the rotation is performed for the ++* plane (1,k+1), so P(k) has the form ++* ++* P(k) = ( c(k) s(k) ) ++* ( 1 ) ++* ( ... ) ++* ( 1 ) ++* ( -s(k) c(k) ) ++* ( 1 ) ++* ( ... ) ++* ( 1 ) ++* ++* where R(k) appears in rows and columns 1 and k+1. ++* ++* Similarly, when PIVOT = 'B' (Bottom pivot), the rotation is ++* performed for the plane (k,z), giving P(k) the form ++* ++* P(k) = ( 1 ) ++* ( ... ) ++* ( 1 ) ++* ( c(k) s(k) ) ++* ( 1 ) ++* ( ... ) ++* ( 1 ) ++* ( -s(k) c(k) ) ++* ++* where R(k) appears in rows and columns k and z. The rotations are ++* performed without ever forming P(k) explicitly. + * + * Arguments + * ========= +@@ -61,13 +96,13 @@ + * Specifies whether the plane rotation matrix P is applied to + * A on the left or the right. + * = 'L': Left, compute A := P*A +-* = 'R': Right, compute A:= A*P' ++* = 'R': Right, compute A:= A*P**T + * + * DIRECT (input) CHARACTER*1 + * Specifies whether P is a forward or backward sequence of + * plane rotations. +-* = 'F': Forward, P = P( z - 1 )*...*P( 2 )*P( 1 ) +-* = 'B': Backward, P = P( 1 )*P( 2 )*...*P( z - 1 ) ++* = 'F': Forward, P = P(z-1)*...*P(2)*P(1) ++* = 'B': Backward, P = P(1)*P(2)*...*P(z-1) + * + * PIVOT (input) CHARACTER*1 + * Specifies the plane for which P(k) is a plane rotation +@@ -84,18 +119,22 @@ + * The number of columns of the matrix A. If n <= 1, an + * immediate return is effected. + * +-* C, S (input) DOUBLE PRECISION arrays, dimension ++* C (input) DOUBLE PRECISION array, dimension ++* (M-1) if SIDE = 'L' ++* (N-1) if SIDE = 'R' ++* The cosines c(k) of the plane rotations. ++* ++* S (input) DOUBLE PRECISION array, dimension + * (M-1) if SIDE = 'L' + * (N-1) if SIDE = 'R' +-* c(k) and s(k) contain the cosine and sine that define the +-* matrix P(k). The two by two plane rotation part of the +-* matrix P(k), R(k), is assumed to be of the form +-* R( k ) = ( c( k ) s( k ) ). +-* ( -s( k ) c( k ) ) ++* The sines s(k) of the plane rotations. The 2-by-2 plane ++* rotation part of the matrix P(k), R(k), has the form ++* R(k) = ( c(k) s(k) ) ++* ( -s(k) c(k) ). + * + * A (input/output) COMPLEX*16 array, dimension (LDA,N) +-* The m by n matrix A. On exit, A is overwritten by P*A if +-* SIDE = 'R' or by A*P' if SIDE = 'L'. ++* The M-by-N matrix A. On exit, A is overwritten by P*A if ++* SIDE = 'R' or by A*P**T if SIDE = 'L'. + * + * LDA (input) INTEGER + * The leading dimension of the array A. LDA >= max(1,M). +diff -uNr LAPACK.orig/SRC/ztgevc.f LAPACK/SRC/ztgevc.f +--- LAPACK.orig/SRC/ztgevc.f Thu Nov 4 14:26:09 1999 ++++ LAPACK/SRC/ztgevc.f Fri May 25 16:13:41 2001 +@@ -1,19 +1,19 @@ +- SUBROUTINE ZTGEVC( SIDE, HOWMNY, SELECT, N, A, LDA, B, LDB, VL, ++ SUBROUTINE ZTGEVC( SIDE, HOWMNY, SELECT, N, S, LDS, P, LDP, VL, + $ LDVL, VR, LDVR, MM, M, WORK, RWORK, INFO ) + * + * -- LAPACK routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* June 30, 1999 ++* May 4, 2001 + * + * .. Scalar Arguments .. + CHARACTER HOWMNY, SIDE +- INTEGER INFO, LDA, LDB, LDVL, LDVR, M, MM, N ++ INTEGER INFO, LDP, LDS, LDVL, LDVR, M, MM, N + * .. + * .. Array Arguments .. + LOGICAL SELECT( * ) + DOUBLE PRECISION RWORK( * ) +- COMPLEX*16 A( LDA, * ), B( LDB, * ), VL( LDVL, * ), ++ COMPLEX*16 P( LDP, * ), S( LDS, * ), VL( LDVL, * ), + $ VR( LDVR, * ), WORK( * ) + * .. + * +@@ -21,28 +21,30 @@ + * Purpose + * ======= + * +-* ZTGEVC computes some or all of the right and/or left generalized +-* eigenvectors of a pair of complex upper triangular matrices (A,B). +-* +-* The right generalized eigenvector x and the left generalized +-* eigenvector y of (A,B) corresponding to a generalized eigenvalue +-* w are defined by: +-* +-* (A - wB) * x = 0 and y**H * (A - wB) = 0 +-* ++* ZTGEVC computes some or all of the right and/or left eigenvectors of ++* a pair of complex matrices (S,P), where S and P are upper triangular. ++* Matrix pairs of this type are produced by the generalized Schur ++* factorization of a complex matrix pair (A,B): ++* ++* A = Q*S*Z**H, B = Q*P*Z**H ++* ++* as computed by ZGGHRD + ZHGEQZ. ++* ++* The right eigenvector x and the left eigenvector y of (S,P) ++* corresponding to an eigenvalue w are defined by: ++* ++* S*x = w*P*x, (y**H)*S = w*(y**H)*P, ++* + * where y**H denotes the conjugate tranpose of y. +-* +-* If an eigenvalue w is determined by zero diagonal elements of both A +-* and B, a unit vector is returned as the corresponding eigenvector. +-* +-* If all eigenvectors are requested, the routine may either return +-* the matrices X and/or Y of right or left eigenvectors of (A,B), or +-* the products Z*X and/or Q*Y, where Z and Q are input unitary +-* matrices. If (A,B) was obtained from the generalized Schur +-* factorization of an original pair of matrices +-* (A0,B0) = (Q*A*Z**H,Q*B*Z**H), +-* then Z*X and Q*Y are the matrices of right or left eigenvectors of +-* A. ++* The eigenvalues are not input to this routine, but are computed ++* directly from the diagonal elements of S and P. ++* ++* This routine returns the matrices X and/or Y of right and left ++* eigenvectors of (S,P), or the products Z*X and/or Q*Y, ++* where Z and Q are input matrices. ++* If Q and Z are the unitary factors from the generalized Schur ++* factorization of a matrix pair (A,B), then Z*X and Q*Y ++* are the matrices of right and left eigenvectors of (A,B). + * + * Arguments + * ========= +@@ -54,66 +56,66 @@ + * + * HOWMNY (input) CHARACTER*1 + * = 'A': compute all right and/or left eigenvectors; +-* = 'B': compute all right and/or left eigenvectors, and +-* backtransform them using the input matrices supplied +-* in VR and/or VL; ++* = 'B': compute all right and/or left eigenvectors, ++* backtransformed by the matrices in VR and/or VL; + * = 'S': compute selected right and/or left eigenvectors, + * specified by the logical array SELECT. + * + * SELECT (input) LOGICAL array, dimension (N) + * If HOWMNY='S', SELECT specifies the eigenvectors to be +-* computed. +-* If HOWMNY='A' or 'B', SELECT is not referenced. +-* To select the eigenvector corresponding to the j-th +-* eigenvalue, SELECT(j) must be set to .TRUE.. ++* computed. The eigenvector corresponding to the j-th ++* eigenvalue is computed if SELECT(j) = .TRUE.. ++* Not referenced if HOWMNY = 'A' or 'B'. + * + * N (input) INTEGER +-* The order of the matrices A and B. N >= 0. +-* +-* A (input) COMPLEX*16 array, dimension (LDA,N) +-* The upper triangular matrix A. +-* +-* LDA (input) INTEGER +-* The leading dimension of array A. LDA >= max(1,N). ++* The order of the matrices S and P. N >= 0. + * +-* B (input) COMPLEX*16 array, dimension (LDB,N) +-* The upper triangular matrix B. B must have real diagonal +-* elements. ++* S (input) COMPLEX*16 array, dimension (LDS,N) ++* The upper triangular matrix S from a generalized Schur ++* factorization, as computed by ZHGEQZ. ++* ++* LDS (input) INTEGER ++* The leading dimension of array S. LDS >= max(1,N). ++* ++* P (input) COMPLEX*16 array, dimension (LDP,N) ++* The upper triangular matrix P from a generalized Schur ++* factorization, as computed by ZHGEQZ. P must have real ++* diagonal elements. + * +-* LDB (input) INTEGER +-* The leading dimension of array B. LDB >= max(1,N). ++* LDP (input) INTEGER ++* The leading dimension of array P. LDP >= max(1,N). + * + * VL (input/output) COMPLEX*16 array, dimension (LDVL,MM) + * On entry, if SIDE = 'L' or 'B' and HOWMNY = 'B', VL must + * contain an N-by-N matrix Q (usually the unitary matrix Q + * of left Schur vectors returned by ZHGEQZ). + * On exit, if SIDE = 'L' or 'B', VL contains: +-* if HOWMNY = 'A', the matrix Y of left eigenvectors of (A,B); ++* if HOWMNY = 'A', the matrix Y of left eigenvectors of (S,P); + * if HOWMNY = 'B', the matrix Q*Y; +-* if HOWMNY = 'S', the left eigenvectors of (A,B) specified by ++* if HOWMNY = 'S', the left eigenvectors of (S,P) specified by + * SELECT, stored consecutively in the columns of + * VL, in the same order as their eigenvalues. +-* If SIDE = 'R', VL is not referenced. ++* Not referenced if SIDE = 'R'. + * + * LDVL (input) INTEGER +-* The leading dimension of array VL. +-* LDVL >= max(1,N) if SIDE = 'L' or 'B'; LDVL >= 1 otherwise. ++* The leading dimension of array VL. LDVL >= 1, and if ++* SIDE = 'L' or 'l' or 'B' or 'b', LDVL >= N. + * + * VR (input/output) COMPLEX*16 array, dimension (LDVR,MM) + * On entry, if SIDE = 'R' or 'B' and HOWMNY = 'B', VR must + * contain an N-by-N matrix Q (usually the unitary matrix Z + * of right Schur vectors returned by ZHGEQZ). + * On exit, if SIDE = 'R' or 'B', VR contains: +-* if HOWMNY = 'A', the matrix X of right eigenvectors of (A,B); ++* if HOWMNY = 'A', the matrix X of right eigenvectors of (S,P); + * if HOWMNY = 'B', the matrix Z*X; +-* if HOWMNY = 'S', the right eigenvectors of (A,B) specified by ++* if HOWMNY = 'S', the right eigenvectors of (S,P) specified by + * SELECT, stored consecutively in the columns of + * VR, in the same order as their eigenvalues. +-* If SIDE = 'L', VR is not referenced. ++* Not referenced if SIDE = 'L'. + * + * LDVR (input) INTEGER +-* The leading dimension of the array VR. +-* LDVR >= max(1,N) if SIDE = 'R' or 'B'; LDVR >= 1 otherwise. ++* The leading dimension of the array VR. LDVR >= 1, and if ++* SIDE = 'R' or 'B', LDVR >= N. + * + * MM (input) INTEGER + * The number of columns in the arrays VL and/or VR. MM >= M. +@@ -180,7 +182,7 @@ + IHWMNY = 2 + ILALL = .FALSE. + ILBACK = .FALSE. +- ELSE IF( LSAME( HOWMNY, 'B' ) .OR. LSAME( HOWMNY, 'T' ) ) THEN ++ ELSE IF( LSAME( HOWMNY, 'B' ) ) THEN + IHWMNY = 3 + ILALL = .TRUE. + ILBACK = .TRUE. +@@ -211,9 +213,9 @@ + INFO = -2 + ELSE IF( N.LT.0 ) THEN + INFO = -4 +- ELSE IF( LDA.LT.MAX( 1, N ) ) THEN ++ ELSE IF( LDS.LT.MAX( 1, N ) ) THEN + INFO = -6 +- ELSE IF( LDB.LT.MAX( 1, N ) ) THEN ++ ELSE IF( LDP.LT.MAX( 1, N ) ) THEN + INFO = -8 + END IF + IF( INFO.NE.0 ) THEN +@@ -237,7 +239,7 @@ + * + ILBBAD = .FALSE. + DO 20 J = 1, N +- IF( DIMAG( B( J, J ) ).NE.ZERO ) ++ IF( DIMAG( P( J, J ) ).NE.ZERO ) + $ ILBBAD = .TRUE. + 20 CONTINUE + * +@@ -275,19 +277,19 @@ + * part of A and B to check for possible overflow in the triangular + * solver. + * +- ANORM = ABS1( A( 1, 1 ) ) +- BNORM = ABS1( B( 1, 1 ) ) ++ ANORM = ABS1( S( 1, 1 ) ) ++ BNORM = ABS1( P( 1, 1 ) ) + RWORK( 1 ) = ZERO + RWORK( N+1 ) = ZERO + DO 40 J = 2, N + RWORK( J ) = ZERO + RWORK( N+J ) = ZERO + DO 30 I = 1, J - 1 +- RWORK( J ) = RWORK( J ) + ABS1( A( I, J ) ) +- RWORK( N+J ) = RWORK( N+J ) + ABS1( B( I, J ) ) ++ RWORK( J ) = RWORK( J ) + ABS1( S( I, J ) ) ++ RWORK( N+J ) = RWORK( N+J ) + ABS1( P( I, J ) ) + 30 CONTINUE +- ANORM = MAX( ANORM, RWORK( J )+ABS1( A( J, J ) ) ) +- BNORM = MAX( BNORM, RWORK( N+J )+ABS1( B( J, J ) ) ) ++ ANORM = MAX( ANORM, RWORK( J )+ABS1( S( J, J ) ) ) ++ BNORM = MAX( BNORM, RWORK( N+J )+ABS1( P( J, J ) ) ) + 40 CONTINUE + * + ASCALE = ONE / MAX( ANORM, SAFMIN ) +@@ -309,8 +311,8 @@ + IF( ILCOMP ) THEN + IEIG = IEIG + 1 + * +- IF( ABS1( A( JE, JE ) ).LE.SAFMIN .AND. +- $ ABS( DBLE( B( JE, JE ) ) ).LE.SAFMIN ) THEN ++ IF( ABS1( S( JE, JE ) ).LE.SAFMIN .AND. ++ $ ABS( DBLE( P( JE, JE ) ) ).LE.SAFMIN ) THEN + * + * Singular matrix pencil -- return unit eigenvector + * +@@ -326,10 +328,10 @@ + * H + * y ( a A - b B ) = 0 + * +- TEMP = ONE / MAX( ABS1( A( JE, JE ) )*ASCALE, +- $ ABS( DBLE( B( JE, JE ) ) )*BSCALE, SAFMIN ) +- SALPHA = ( TEMP*A( JE, JE ) )*ASCALE +- SBETA = ( TEMP*DBLE( B( JE, JE ) ) )*BSCALE ++ TEMP = ONE / MAX( ABS1( S( JE, JE ) )*ASCALE, ++ $ ABS( DBLE( P( JE, JE ) ) )*BSCALE, SAFMIN ) ++ SALPHA = ( TEMP*S( JE, JE ) )*ASCALE ++ SBETA = ( TEMP*DBLE( P( JE, JE ) ) )*BSCALE + ACOEFF = SBETA*ASCALE + BCOEFF = SALPHA*BSCALE + * +@@ -380,7 +382,7 @@ + * + * Compute + * j-1 +-* SUM = sum conjg( a*A(k,j) - b*B(k,j) )*x(k) ++* SUM = sum conjg( a*S(k,j) - b*P(k,j) )*x(k) + * k=je + * (Scale if necessary) + * +@@ -396,16 +398,16 @@ + SUMB = CZERO + * + DO 80 JR = JE, J - 1 +- SUMA = SUMA + DCONJG( A( JR, J ) )*WORK( JR ) +- SUMB = SUMB + DCONJG( B( JR, J ) )*WORK( JR ) ++ SUMA = SUMA + DCONJG( S( JR, J ) )*WORK( JR ) ++ SUMB = SUMB + DCONJG( P( JR, J ) )*WORK( JR ) + 80 CONTINUE + SUM = ACOEFF*SUMA - DCONJG( BCOEFF )*SUMB + * +-* Form x(j) = - SUM / conjg( a*A(j,j) - b*B(j,j) ) ++* Form x(j) = - SUM / conjg( a*S(j,j) - b*P(j,j) ) + * + * with scaling and perturbation of the denominator + * +- D = DCONJG( ACOEFF*A( J, J )-BCOEFF*B( J, J ) ) ++ D = DCONJG( ACOEFF*S( J, J )-BCOEFF*P( J, J ) ) + IF( ABS1( D ).LE.DMIN ) + $ D = DCMPLX( DMIN ) + * +@@ -475,8 +477,8 @@ + IF( ILCOMP ) THEN + IEIG = IEIG - 1 + * +- IF( ABS1( A( JE, JE ) ).LE.SAFMIN .AND. +- $ ABS( DBLE( B( JE, JE ) ) ).LE.SAFMIN ) THEN ++ IF( ABS1( S( JE, JE ) ).LE.SAFMIN .AND. ++ $ ABS( DBLE( P( JE, JE ) ) ).LE.SAFMIN ) THEN + * + * Singular matrix pencil -- return unit eigenvector + * +@@ -492,10 +494,10 @@ + * + * ( a A - b B ) x = 0 + * +- TEMP = ONE / MAX( ABS1( A( JE, JE ) )*ASCALE, +- $ ABS( DBLE( B( JE, JE ) ) )*BSCALE, SAFMIN ) +- SALPHA = ( TEMP*A( JE, JE ) )*ASCALE +- SBETA = ( TEMP*DBLE( B( JE, JE ) ) )*BSCALE ++ TEMP = ONE / MAX( ABS1( S( JE, JE ) )*ASCALE, ++ $ ABS( DBLE( P( JE, JE ) ) )*BSCALE, SAFMIN ) ++ SALPHA = ( TEMP*S( JE, JE ) )*ASCALE ++ SBETA = ( TEMP*DBLE( P( JE, JE ) ) )*BSCALE + ACOEFF = SBETA*ASCALE + BCOEFF = SALPHA*BSCALE + * +@@ -542,7 +544,7 @@ + * WORK(j+1:JE) contains x + * + DO 170 JR = 1, JE - 1 +- WORK( JR ) = ACOEFF*A( JR, JE ) - BCOEFF*B( JR, JE ) ++ WORK( JR ) = ACOEFF*S( JR, JE ) - BCOEFF*P( JR, JE ) + 170 CONTINUE + WORK( JE ) = CONE + * +@@ -551,7 +553,7 @@ + * Form x(j) := - w(j) / d + * with scaling and perturbation of the denominator + * +- D = ACOEFF*A( J, J ) - BCOEFF*B( J, J ) ++ D = ACOEFF*S( J, J ) - BCOEFF*P( J, J ) + IF( ABS1( D ).LE.DMIN ) + $ D = DCMPLX( DMIN ) + * +@@ -568,7 +570,7 @@ + * + IF( J.GT.1 ) THEN + * +-* w = w + x(j)*(a A(*,j) - b B(*,j) ) with scaling ++* w = w + x(j)*(a S(*,j) - b P(*,j) ) with scaling + * + IF( ABS1( WORK( J ) ).GT.ONE ) THEN + TEMP = ONE / ABS1( WORK( J ) ) +@@ -583,8 +585,8 @@ + CA = ACOEFF*WORK( J ) + CB = BCOEFF*WORK( J ) + DO 200 JR = 1, J - 1 +- WORK( JR ) = WORK( JR ) + CA*A( JR, J ) - +- $ CB*B( JR, J ) ++ WORK( JR ) = WORK( JR ) + CA*S( JR, J ) - ++ $ CB*P( JR, J ) + 200 CONTINUE + END IF + 210 CONTINUE +diff -uNr LAPACK.orig/SRC/ztrevc.f LAPACK/SRC/ztrevc.f +--- LAPACK.orig/SRC/ztrevc.f Thu Nov 4 14:25:39 1999 ++++ LAPACK/SRC/ztrevc.f Fri May 25 16:14:01 2001 +@@ -4,7 +4,7 @@ + * -- LAPACK routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* June 30, 1999 ++* May 7, 2001 + * + * .. Scalar Arguments .. + CHARACTER HOWMNY, SIDE +@@ -22,20 +22,23 @@ + * + * ZTREVC computes some or all of the right and/or left eigenvectors of + * a complex upper triangular matrix T. +-* ++* Matrices of this type are produced by the Schur factorization of ++* a complex general matrix: A = Q*T*Q**H, as computed by ZHSEQR. ++* + * The right eigenvector x and the left eigenvector y of T corresponding + * to an eigenvalue w are defined by: +-* +-* T*x = w*x, y'*T = w*y' +-* +-* where y' denotes the conjugate transpose of the vector y. +-* +-* If all eigenvectors are requested, the routine may either return the +-* matrices X and/or Y of right or left eigenvectors of T, or the +-* products Q*X and/or Q*Y, where Q is an input unitary +-* matrix. If T was obtained from the Schur factorization of an +-* original matrix A = Q*T*Q', then Q*X and Q*Y are the matrices of +-* right or left eigenvectors of A. ++* ++* T*x = w*x, (y**H)*T = w*(y**H) ++* ++* where y**H denotes the conjugate transpose of the vector y. ++* The eigenvalues are not input to this routine, but are read directly ++* from the diagonal of T. ++* ++* This routine returns the matrices X and/or Y of right and left ++* eigenvectors of T, or the products Q*X and/or Q*Y, where Q is an ++* input matrix. If Q is the unitary factor that reduces a matrix A to ++* Schur form T, then Q*X and Q*Y are the matrices of right and left ++* eigenvectors of A. + * + * Arguments + * ========= +@@ -48,17 +51,17 @@ + * HOWMNY (input) CHARACTER*1 + * = 'A': compute all right and/or left eigenvectors; + * = 'B': compute all right and/or left eigenvectors, +-* and backtransform them using the input matrices +-* supplied in VR and/or VL; ++* backtransformed using the matrices supplied in ++* VR and/or VL; + * = 'S': compute selected right and/or left eigenvectors, +-* specified by the logical array SELECT. ++* as indicated by the logical array SELECT. + * + * SELECT (input) LOGICAL array, dimension (N) + * If HOWMNY = 'S', SELECT specifies the eigenvectors to be + * computed. +-* If HOWMNY = 'A' or 'B', SELECT is not referenced. +-* To select the eigenvector corresponding to the j-th +-* eigenvalue, SELECT(j) must be set to .TRUE.. ++* The eigenvector corresponding to the j-th eigenvalue is ++* computed if SELECT(j) = .TRUE.. ++* Not referenced if HOWMNY = 'A' or 'B'. + * + * N (input) INTEGER + * The order of the matrix T. N >= 0. +@@ -76,19 +79,16 @@ + * Schur vectors returned by ZHSEQR). + * On exit, if SIDE = 'L' or 'B', VL contains: + * if HOWMNY = 'A', the matrix Y of left eigenvectors of T; +-* VL is lower triangular. The i-th column +-* VL(i) of VL is the eigenvector corresponding +-* to T(i,i). + * if HOWMNY = 'B', the matrix Q*Y; + * if HOWMNY = 'S', the left eigenvectors of T specified by + * SELECT, stored consecutively in the columns + * of VL, in the same order as their + * eigenvalues. +-* If SIDE = 'R', VL is not referenced. ++* Not referenced if SIDE = 'R'. + * + * LDVL (input) INTEGER +-* The leading dimension of the array VL. LDVL >= max(1,N) if +-* SIDE = 'L' or 'B'; LDVL >= 1 otherwise. ++* The leading dimension of the array VL. LDVL >= 1, and if ++* SIDE = 'L' or 'B', LDVL >= N. + * + * VR (input/output) COMPLEX*16 array, dimension (LDVR,MM) + * On entry, if SIDE = 'R' or 'B' and HOWMNY = 'B', VR must +@@ -96,19 +96,16 @@ + * Schur vectors returned by ZHSEQR). + * On exit, if SIDE = 'R' or 'B', VR contains: + * if HOWMNY = 'A', the matrix X of right eigenvectors of T; +-* VR is upper triangular. The i-th column +-* VR(i) of VR is the eigenvector corresponding +-* to T(i,i). + * if HOWMNY = 'B', the matrix Q*X; + * if HOWMNY = 'S', the right eigenvectors of T specified by + * SELECT, stored consecutively in the columns + * of VR, in the same order as their + * eigenvalues. +-* If SIDE = 'L', VR is not referenced. ++* Not referenced if SIDE = 'L'. + * + * LDVR (input) INTEGER +-* The leading dimension of the array VR. LDVR >= max(1,N) if +-* SIDE = 'R' or 'B'; LDVR >= 1 otherwise. ++* The leading dimension of the array VR. LDVR >= 1, and if ++* SIDE = 'R' or 'B'; LDVR >= N. + * + * MM (input) INTEGER + * The number of columns in the arrays VL and/or VR. MM >= M. +diff -uNr LAPACK.orig/SRC/ztrsen.f LAPACK/SRC/ztrsen.f +--- LAPACK.orig/SRC/ztrsen.f Thu Nov 4 14:25:39 1999 ++++ LAPACK/SRC/ztrsen.f Fri May 25 16:14:20 2001 +@@ -4,7 +4,7 @@ + * -- LAPACK routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* June 30, 1999 ++* January 3, 2001 + * + * .. Scalar Arguments .. + CHARACTER COMPQ, JOB +@@ -93,14 +93,13 @@ + * If JOB = 'N' or 'E', SEP is not referenced. + * + * WORK (workspace/output) COMPLEX*16 array, dimension (LWORK) +-* If JOB = 'N', WORK is not referenced. Otherwise, +-* on exit, if INFO = 0, WORK(1) returns the optimal LWORK. ++* On exit, if INFO = 0, WORK(1) returns the optimal LWORK. + * + * LWORK (input) INTEGER + * The dimension of the array WORK. + * If JOB = 'N', LWORK >= 1; +-* if JOB = 'E', LWORK = M*(N-M); +-* if JOB = 'V' or 'B', LWORK >= 2*M*(N-M). ++* if JOB = 'E', LWORK = max(1,M*(N-M)); ++* if JOB = 'V' or 'B', LWORK >= max(1,2*M*(N-M)). + * + * If LWORK = -1, then a workspace query is assumed; the routine + * only calculates the optimal size of the WORK array, returns +diff -uNr LAPACK.orig/SRC/ztrsyl.f LAPACK/SRC/ztrsyl.f +--- LAPACK.orig/SRC/ztrsyl.f Thu Nov 4 14:25:39 1999 ++++ LAPACK/SRC/ztrsyl.f Fri May 25 16:14:31 2001 +@@ -4,7 +4,7 @@ + * -- LAPACK routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* June 30, 1999 ++* January 9, 2001 + * + * .. Scalar Arguments .. + CHARACTER TRANA, TRANB +@@ -119,11 +119,9 @@ + NOTRNB = LSAME( TRANB, 'N' ) + * + INFO = 0 +- IF( .NOT.NOTRNA .AND. .NOT.LSAME( TRANA, 'T' ) .AND. .NOT. +- $ LSAME( TRANA, 'C' ) ) THEN ++ IF( .NOT.NOTRNA .AND. .NOT.LSAME( TRANA, 'C' ) ) THEN + INFO = -1 +- ELSE IF( .NOT.NOTRNB .AND. .NOT.LSAME( TRANB, 'T' ) .AND. .NOT. +- $ LSAME( TRANB, 'C' ) ) THEN ++ ELSE IF( .NOT.NOTRNB .AND. .NOT.LSAME( TRANB, 'C' ) ) THEN + INFO = -2 + ELSE IF( ISGN.NE.1 .AND. ISGN.NE.-1 ) THEN + INFO = -3 +diff -uNr LAPACK.orig/TESTING/EIG/cerrgg.f LAPACK/TESTING/EIG/cerrgg.f +--- LAPACK.orig/TESTING/EIG/cerrgg.f Thu Nov 4 14:27:30 1999 ++++ LAPACK/TESTING/EIG/cerrgg.f Fri May 25 16:17:13 2001 +@@ -3,7 +3,7 @@ + * -- LAPACK test routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* June 30, 1999 ++* October 9, 2000 + * + * .. Scalar Arguments .. + CHARACTER*3 PATH +@@ -245,24 +245,24 @@ + $ 1, R1, R2, U, 1, V, 1, Q, 1, W, RW, IW, INFO ) + CALL CHKXER( 'CGGSVD', INFOT, NOUT, LERR, OK ) + INFOT = 10 +- CALL CGGSVD( 'N', 'N', 'N', 0, 0, 0, DUMMYK, DUMMYL, A, 0, B, ++ CALL CGGSVD( 'N', 'N', 'N', 2, 1, 1, DUMMYK, DUMMYL, A, 1, B, + $ 1, R1, R2, U, 1, V, 1, Q, 1, W, RW, IW, INFO ) + CALL CHKXER( 'CGGSVD', INFOT, NOUT, LERR, OK ) + INFOT = 12 +- CALL CGGSVD( 'N', 'N', 'N', 0, 0, 0, DUMMYK, DUMMYL, A, 1, B, +- $ 0, R1, R2, U, 1, V, 1, Q, 1, W, RW, IW, INFO ) ++ CALL CGGSVD( 'N', 'N', 'N', 1, 1, 2, DUMMYK, DUMMYL, A, 1, B, ++ $ 1, R1, R2, U, 1, V, 1, Q, 1, W, RW, IW, INFO ) + CALL CHKXER( 'CGGSVD', INFOT, NOUT, LERR, OK ) + INFOT = 16 +- CALL CGGSVD( 'U', 'N', 'N', 0, 0, 0, DUMMYK, DUMMYL, A, 1, B, +- $ 1, R1, R2, U, 0, V, 1, Q, 1, W, RW, IW, INFO ) ++ CALL CGGSVD( 'U', 'N', 'N', 2, 2, 2, DUMMYK, DUMMYL, A, 2, B, ++ $ 2, R1, R2, U, 1, V, 1, Q, 1, W, RW, IW, INFO ) + CALL CHKXER( 'CGGSVD', INFOT, NOUT, LERR, OK ) + INFOT = 18 +- CALL CGGSVD( 'N', 'V', 'N', 0, 0, 0, DUMMYK, DUMMYL, A, 1, B, +- $ 1, R1, R2, U, 1, V, 0, Q, 1, W, RW, IW, INFO ) ++ CALL CGGSVD( 'N', 'V', 'N', 2, 2, 2, DUMMYK, DUMMYL, A, 2, B, ++ $ 2, R1, R2, U, 2, V, 1, Q, 1, W, RW, IW, INFO ) + CALL CHKXER( 'CGGSVD', INFOT, NOUT, LERR, OK ) + INFOT = 20 +- CALL CGGSVD( 'N', 'N', 'Q', 0, 0, 0, DUMMYK, DUMMYL, A, 1, B, +- $ 1, R1, R2, U, 1, V, 1, Q, 0, W, RW, IW, INFO ) ++ CALL CGGSVD( 'N', 'N', 'Q', 2, 2, 2, DUMMYK, DUMMYL, A, 2, B, ++ $ 2, R1, R2, U, 2, V, 2, Q, 1, W, RW, IW, INFO ) + CALL CHKXER( 'CGGSVD', INFOT, NOUT, LERR, OK ) + NT = NT + 11 + * +@@ -300,28 +300,28 @@ + $ INFO ) + CALL CHKXER( 'CGGSVP', INFOT, NOUT, LERR, OK ) + INFOT = 8 +- CALL CGGSVP( 'N', 'N', 'N', 0, 0, 0, A, 0, B, 1, TOLA, TOLB, ++ CALL CGGSVP( 'N', 'N', 'N', 2, 1, 1, A, 1, B, 1, TOLA, TOLB, + $ DUMMYK, DUMMYL, U, 1, V, 1, Q, 1, IW, RW, TAU, W, + $ INFO ) + CALL CHKXER( 'CGGSVP', INFOT, NOUT, LERR, OK ) + INFOT = 10 +- CALL CGGSVP( 'N', 'N', 'N', 0, 0, 0, A, 1, B, 0, TOLA, TOLB, ++ CALL CGGSVP( 'N', 'N', 'N', 1, 2, 1, A, 1, B, 1, TOLA, TOLB, + $ DUMMYK, DUMMYL, U, 1, V, 1, Q, 1, IW, RW, TAU, W, + $ INFO ) + CALL CHKXER( 'CGGSVP', INFOT, NOUT, LERR, OK ) + INFOT = 16 +- CALL CGGSVP( 'U', 'N', 'N', 0, 0, 0, A, 1, B, 1, TOLA, TOLB, +- $ DUMMYK, DUMMYL, U, 0, V, 1, Q, 1, IW, RW, TAU, W, ++ CALL CGGSVP( 'U', 'N', 'N', 2, 2, 2, A, 2, B, 2, TOLA, TOLB, ++ $ DUMMYK, DUMMYL, U, 1, V, 1, Q, 1, IW, RW, TAU, W, + $ INFO ) + CALL CHKXER( 'CGGSVP', INFOT, NOUT, LERR, OK ) + INFOT = 18 +- CALL CGGSVP( 'N', 'V', 'N', 0, 0, 0, A, 1, B, 1, TOLA, TOLB, +- $ DUMMYK, DUMMYL, U, 1, V, 0, Q, 1, IW, RW, TAU, W, ++ CALL CGGSVP( 'N', 'V', 'N', 2, 2, 2, A, 2, B, 2, TOLA, TOLB, ++ $ DUMMYK, DUMMYL, U, 2, V, 1, Q, 1, IW, RW, TAU, W, + $ INFO ) + CALL CHKXER( 'CGGSVP', INFOT, NOUT, LERR, OK ) + INFOT = 20 +- CALL CGGSVP( 'N', 'N', 'Q', 0, 0, 0, A, 1, B, 1, TOLA, TOLB, +- $ DUMMYK, DUMMYL, U, 1, V, 1, Q, 0, IW, RW, TAU, W, ++ CALL CGGSVP( 'N', 'N', 'Q', 2, 2, 2, A, 2, B, 2, TOLA, TOLB, ++ $ DUMMYK, DUMMYL, U, 2, V, 2, Q, 1, IW, RW, TAU, W, + $ INFO ) + CALL CHKXER( 'CGGSVP', INFOT, NOUT, LERR, OK ) + NT = NT + 11 +diff -uNr LAPACK.orig/TESTING/EIG/derrgg.f LAPACK/TESTING/EIG/derrgg.f +--- LAPACK.orig/TESTING/EIG/derrgg.f Thu Nov 4 14:27:53 1999 ++++ LAPACK/TESTING/EIG/derrgg.f Fri May 25 16:17:09 2001 +@@ -3,7 +3,7 @@ + * -- LAPACK test routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* June 30, 1999 ++* October 9, 2000 + * + * .. Scalar Arguments .. + CHARACTER*3 PATH +@@ -244,24 +244,24 @@ + $ 1, R1, R2, U, 1, V, 1, Q, 1, W, IW, INFO ) + CALL CHKXER( 'DGGSVD', INFOT, NOUT, LERR, OK ) + INFOT = 10 +- CALL DGGSVD( 'N', 'N', 'N', 0, 0, 0, DUMMYK, DUMMYL, A, 0, B, ++ CALL DGGSVD( 'N', 'N', 'N', 2, 1, 1, DUMMYK, DUMMYL, A, 1, B, + $ 1, R1, R2, U, 1, V, 1, Q, 1, W, IW, INFO ) + CALL CHKXER( 'DGGSVD', INFOT, NOUT, LERR, OK ) + INFOT = 12 +- CALL DGGSVD( 'N', 'N', 'N', 0, 0, 0, DUMMYK, DUMMYL, A, 1, B, +- $ 0, R1, R2, U, 1, V, 1, Q, 1, W, IW, INFO ) ++ CALL DGGSVD( 'N', 'N', 'N', 1, 1, 2, DUMMYK, DUMMYL, A, 1, B, ++ $ 1, R1, R2, U, 1, V, 1, Q, 1, W, IW, INFO ) + CALL CHKXER( 'DGGSVD', INFOT, NOUT, LERR, OK ) + INFOT = 16 +- CALL DGGSVD( 'U', 'N', 'N', 0, 0, 0, DUMMYK, DUMMYL, A, 1, B, +- $ 1, R1, R2, U, 0, V, 1, Q, 1, W, IW, INFO ) ++ CALL DGGSVD( 'U', 'N', 'N', 2, 2, 2, DUMMYK, DUMMYL, A, 2, B, ++ $ 2, R1, R2, U, 1, V, 1, Q, 1, W, IW, INFO ) + CALL CHKXER( 'DGGSVD', INFOT, NOUT, LERR, OK ) + INFOT = 18 +- CALL DGGSVD( 'N', 'V', 'N', 0, 0, 0, DUMMYK, DUMMYL, A, 1, B, +- $ 1, R1, R2, U, 1, V, 0, Q, 1, W, IW, INFO ) ++ CALL DGGSVD( 'N', 'V', 'N', 1, 1, 2, DUMMYK, DUMMYL, A, 1, B, ++ $ 2, R1, R2, U, 1, V, 1, Q, 1, W, IW, INFO ) + CALL CHKXER( 'DGGSVD', INFOT, NOUT, LERR, OK ) + INFOT = 20 +- CALL DGGSVD( 'N', 'N', 'Q', 0, 0, 0, DUMMYK, DUMMYL, A, 1, B, +- $ 1, R1, R2, U, 1, V, 1, Q, 0, W, IW, INFO ) ++ CALL DGGSVD( 'N', 'N', 'Q', 1, 2, 1, DUMMYK, DUMMYL, A, 1, B, ++ $ 1, R1, R2, U, 1, V, 1, Q, 1, W, IW, INFO ) + CALL CHKXER( 'DGGSVD', INFOT, NOUT, LERR, OK ) + NT = NT + 11 + * +@@ -299,28 +299,28 @@ + $ INFO ) + CALL CHKXER( 'DGGSVP', INFOT, NOUT, LERR, OK ) + INFOT = 8 +- CALL DGGSVP( 'N', 'N', 'N', 0, 0, 0, A, 0, B, 1, TOLA, TOLB, ++ CALL DGGSVP( 'N', 'N', 'N', 2, 1, 1, A, 1, B, 1, TOLA, TOLB, + $ DUMMYK, DUMMYL, U, 1, V, 1, Q, 1, IW, TAU, W, + $ INFO ) + CALL CHKXER( 'DGGSVP', INFOT, NOUT, LERR, OK ) + INFOT = 10 +- CALL DGGSVP( 'N', 'N', 'N', 0, 0, 0, A, 1, B, 0, TOLA, TOLB, ++ CALL DGGSVP( 'N', 'N', 'N', 1, 2, 1, A, 1, B, 1, TOLA, TOLB, + $ DUMMYK, DUMMYL, U, 1, V, 1, Q, 1, IW, TAU, W, + $ INFO ) + CALL CHKXER( 'DGGSVP', INFOT, NOUT, LERR, OK ) + INFOT = 16 +- CALL DGGSVP( 'U', 'N', 'N', 0, 0, 0, A, 1, B, 1, TOLA, TOLB, +- $ DUMMYK, DUMMYL, U, 0, V, 1, Q, 1, IW, TAU, W, ++ CALL DGGSVP( 'U', 'N', 'N', 2, 2, 2, A, 2, B, 2, TOLA, TOLB, ++ $ DUMMYK, DUMMYL, U, 1, V, 1, Q, 1, IW, TAU, W, + $ INFO ) + CALL CHKXER( 'DGGSVP', INFOT, NOUT, LERR, OK ) + INFOT = 18 +- CALL DGGSVP( 'N', 'V', 'N', 0, 0, 0, A, 1, B, 1, TOLA, TOLB, +- $ DUMMYK, DUMMYL, U, 1, V, 0, Q, 1, IW, TAU, W, ++ CALL DGGSVP( 'N', 'V', 'N', 1, 2, 1, A, 1, B, 2, TOLA, TOLB, ++ $ DUMMYK, DUMMYL, U, 1, V, 1, Q, 1, IW, TAU, W, + $ INFO ) + CALL CHKXER( 'DGGSVP', INFOT, NOUT, LERR, OK ) + INFOT = 20 +- CALL DGGSVP( 'N', 'N', 'Q', 0, 0, 0, A, 1, B, 1, TOLA, TOLB, +- $ DUMMYK, DUMMYL, U, 1, V, 1, Q, 0, IW, TAU, W, ++ CALL DGGSVP( 'N', 'N', 'Q', 1, 1, 2, A, 1, B, 1, TOLA, TOLB, ++ $ DUMMYK, DUMMYL, U, 1, V, 1, Q, 1, IW, TAU, W, + $ INFO ) + CALL CHKXER( 'DGGSVP', INFOT, NOUT, LERR, OK ) + NT = NT + 11 +@@ -501,11 +501,11 @@ + CALL CHKXER( 'DGGRQF', INFOT, NOUT, LERR, OK ) + NT = NT + 6 + * +-* Test error exits for the DGS, DGV, DGX, and DXV paths. ++* Test error exits for the SGS, SGV, SGX, and SXV paths. + * +- ELSE IF( LSAMEN( 3, PATH, 'DGS' ) .OR. +- $ LSAMEN( 3, PATH, 'DGV' ) .OR. +- $ LSAMEN( 3, PATH, 'DGX' ) .OR. LSAMEN( 3, PATH, 'DXV' ) ) ++ ELSE IF( LSAMEN( 3, PATH, 'SGS' ) .OR. ++ $ LSAMEN( 3, PATH, 'SGV' ) .OR. ++ $ LSAMEN( 3, PATH, 'SGX' ) .OR. LSAMEN( 3, PATH, 'SXV' ) ) + $ THEN + * + * DGGES +diff -uNr LAPACK.orig/TESTING/EIG/serrgg.f LAPACK/TESTING/EIG/serrgg.f +--- LAPACK.orig/TESTING/EIG/serrgg.f Thu Nov 4 14:27:25 1999 ++++ LAPACK/TESTING/EIG/serrgg.f Fri May 25 16:17:05 2001 +@@ -3,7 +3,7 @@ + * -- LAPACK test routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* June 30, 1999 ++* October 9, 2000 + * + * .. Scalar Arguments .. + CHARACTER*3 PATH +@@ -244,24 +244,24 @@ + $ 1, R1, R2, U, 1, V, 1, Q, 1, W, IW, INFO ) + CALL CHKXER( 'SGGSVD', INFOT, NOUT, LERR, OK ) + INFOT = 10 +- CALL SGGSVD( 'N', 'N', 'N', 0, 0, 0, DUMMYK, DUMMYL, A, 0, B, ++ CALL SGGSVD( 'N', 'N', 'N', 2, 1, 1, DUMMYK, DUMMYL, A, 1, B, + $ 1, R1, R2, U, 1, V, 1, Q, 1, W, IW, INFO ) + CALL CHKXER( 'SGGSVD', INFOT, NOUT, LERR, OK ) + INFOT = 12 +- CALL SGGSVD( 'N', 'N', 'N', 0, 0, 0, DUMMYK, DUMMYL, A, 1, B, +- $ 0, R1, R2, U, 1, V, 1, Q, 1, W, IW, INFO ) ++ CALL SGGSVD( 'N', 'N', 'N', 1, 1, 2, DUMMYK, DUMMYL, A, 1, B, ++ $ 1, R1, R2, U, 1, V, 1, Q, 1, W, IW, INFO ) + CALL CHKXER( 'SGGSVD', INFOT, NOUT, LERR, OK ) + INFOT = 16 +- CALL SGGSVD( 'U', 'N', 'N', 0, 0, 0, DUMMYK, DUMMYL, A, 1, B, +- $ 1, R1, R2, U, 0, V, 1, Q, 1, W, IW, INFO ) ++ CALL SGGSVD( 'U', 'N', 'N', 2, 2, 2, DUMMYK, DUMMYL, A, 2, B, ++ $ 2, R1, R2, U, 1, V, 1, Q, 1, W, IW, INFO ) + CALL CHKXER( 'SGGSVD', INFOT, NOUT, LERR, OK ) + INFOT = 18 +- CALL SGGSVD( 'N', 'V', 'N', 0, 0, 0, DUMMYK, DUMMYL, A, 1, B, +- $ 1, R1, R2, U, 1, V, 0, Q, 1, W, IW, INFO ) ++ CALL SGGSVD( 'N', 'V', 'N', 1, 1, 2, DUMMYK, DUMMYL, A, 1, B, ++ $ 2, R1, R2, U, 1, V, 1, Q, 1, W, IW, INFO ) + CALL CHKXER( 'SGGSVD', INFOT, NOUT, LERR, OK ) + INFOT = 20 +- CALL SGGSVD( 'N', 'N', 'Q', 0, 0, 0, DUMMYK, DUMMYL, A, 1, B, +- $ 1, R1, R2, U, 1, V, 1, Q, 0, W, IW, INFO ) ++ CALL SGGSVD( 'N', 'N', 'Q', 1, 2, 1, DUMMYK, DUMMYL, A, 1, B, ++ $ 1, R1, R2, U, 1, V, 1, Q, 1, W, IW, INFO ) + CALL CHKXER( 'SGGSVD', INFOT, NOUT, LERR, OK ) + NT = NT + 11 + * +@@ -299,28 +299,28 @@ + $ INFO ) + CALL CHKXER( 'SGGSVP', INFOT, NOUT, LERR, OK ) + INFOT = 8 +- CALL SGGSVP( 'N', 'N', 'N', 0, 0, 0, A, 0, B, 1, TOLA, TOLB, ++ CALL SGGSVP( 'N', 'N', 'N', 2, 1, 1, A, 1, B, 1, TOLA, TOLB, + $ DUMMYK, DUMMYL, U, 1, V, 1, Q, 1, IW, TAU, W, + $ INFO ) + CALL CHKXER( 'SGGSVP', INFOT, NOUT, LERR, OK ) + INFOT = 10 +- CALL SGGSVP( 'N', 'N', 'N', 0, 0, 0, A, 1, B, 0, TOLA, TOLB, ++ CALL SGGSVP( 'N', 'N', 'N', 1, 2, 1, A, 1, B, 1, TOLA, TOLB, + $ DUMMYK, DUMMYL, U, 1, V, 1, Q, 1, IW, TAU, W, + $ INFO ) + CALL CHKXER( 'SGGSVP', INFOT, NOUT, LERR, OK ) + INFOT = 16 +- CALL SGGSVP( 'U', 'N', 'N', 0, 0, 0, A, 1, B, 1, TOLA, TOLB, +- $ DUMMYK, DUMMYL, U, 0, V, 1, Q, 1, IW, TAU, W, ++ CALL SGGSVP( 'U', 'N', 'N', 2, 2, 2, A, 2, B, 2, TOLA, TOLB, ++ $ DUMMYK, DUMMYL, U, 1, V, 1, Q, 1, IW, TAU, W, + $ INFO ) + CALL CHKXER( 'SGGSVP', INFOT, NOUT, LERR, OK ) + INFOT = 18 +- CALL SGGSVP( 'N', 'V', 'N', 0, 0, 0, A, 1, B, 1, TOLA, TOLB, +- $ DUMMYK, DUMMYL, U, 1, V, 0, Q, 1, IW, TAU, W, ++ CALL SGGSVP( 'N', 'V', 'N', 1, 2, 1, A, 1, B, 2, TOLA, TOLB, ++ $ DUMMYK, DUMMYL, U, 1, V, 1, Q, 1, IW, TAU, W, + $ INFO ) + CALL CHKXER( 'SGGSVP', INFOT, NOUT, LERR, OK ) + INFOT = 20 +- CALL SGGSVP( 'N', 'N', 'Q', 0, 0, 0, A, 1, B, 1, TOLA, TOLB, +- $ DUMMYK, DUMMYL, U, 1, V, 1, Q, 0, IW, TAU, W, ++ CALL SGGSVP( 'N', 'N', 'Q', 1, 1, 2, A, 1, B, 1, TOLA, TOLB, ++ $ DUMMYK, DUMMYL, U, 1, V, 1, Q, 1, IW, TAU, W, + $ INFO ) + CALL CHKXER( 'SGGSVP', INFOT, NOUT, LERR, OK ) + NT = NT + 11 +diff -uNr LAPACK.orig/TESTING/EIG/zerrgg.f LAPACK/TESTING/EIG/zerrgg.f +--- LAPACK.orig/TESTING/EIG/zerrgg.f Thu Nov 4 14:27:40 1999 ++++ LAPACK/TESTING/EIG/zerrgg.f Fri May 25 16:17:20 2001 +@@ -3,7 +3,7 @@ + * -- LAPACK test routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* June 30, 1999 ++* October 9, 2000 + * + * .. Scalar Arguments .. + CHARACTER*3 PATH +@@ -245,24 +245,24 @@ + $ 1, R1, R2, U, 1, V, 1, Q, 1, W, RW, IW, INFO ) + CALL CHKXER( 'ZGGSVD', INFOT, NOUT, LERR, OK ) + INFOT = 10 +- CALL ZGGSVD( 'N', 'N', 'N', 0, 0, 0, DUMMYK, DUMMYL, A, 0, B, ++ CALL ZGGSVD( 'N', 'N', 'N', 2, 1, 1, DUMMYK, DUMMYL, A, 1, B, + $ 1, R1, R2, U, 1, V, 1, Q, 1, W, RW, IW, INFO ) + CALL CHKXER( 'ZGGSVD', INFOT, NOUT, LERR, OK ) + INFOT = 12 +- CALL ZGGSVD( 'N', 'N', 'N', 0, 0, 0, DUMMYK, DUMMYL, A, 1, B, +- $ 0, R1, R2, U, 1, V, 1, Q, 1, W, RW, IW, INFO ) ++ CALL ZGGSVD( 'N', 'N', 'N', 1, 1, 2, DUMMYK, DUMMYL, A, 1, B, ++ $ 1, R1, R2, U, 1, V, 1, Q, 1, W, RW, IW, INFO ) + CALL CHKXER( 'ZGGSVD', INFOT, NOUT, LERR, OK ) + INFOT = 16 +- CALL ZGGSVD( 'U', 'N', 'N', 0, 0, 0, DUMMYK, DUMMYL, A, 1, B, +- $ 1, R1, R2, U, 0, V, 1, Q, 1, W, RW, IW, INFO ) ++ CALL ZGGSVD( 'U', 'N', 'N', 2, 2, 2, DUMMYK, DUMMYL, A, 2, B, ++ $ 2, R1, R2, U, 1, V, 1, Q, 1, W, RW, IW, INFO ) + CALL CHKXER( 'ZGGSVD', INFOT, NOUT, LERR, OK ) + INFOT = 18 +- CALL ZGGSVD( 'N', 'V', 'N', 0, 0, 0, DUMMYK, DUMMYL, A, 1, B, +- $ 1, R1, R2, U, 1, V, 0, Q, 1, W, RW, IW, INFO ) ++ CALL ZGGSVD( 'N', 'V', 'N', 2, 2, 2, DUMMYK, DUMMYL, A, 2, B, ++ $ 2, R1, R2, U, 2, V, 1, Q, 1, W, RW, IW, INFO ) + CALL CHKXER( 'ZGGSVD', INFOT, NOUT, LERR, OK ) + INFOT = 20 +- CALL ZGGSVD( 'N', 'N', 'Q', 0, 0, 0, DUMMYK, DUMMYL, A, 1, B, +- $ 1, R1, R2, U, 1, V, 1, Q, 0, W, RW, IW, INFO ) ++ CALL ZGGSVD( 'N', 'N', 'Q', 2, 2, 2, DUMMYK, DUMMYL, A, 2, B, ++ $ 2, R1, R2, U, 2, V, 2, Q, 1, W, RW, IW, INFO ) + CALL CHKXER( 'ZGGSVD', INFOT, NOUT, LERR, OK ) + NT = NT + 11 + * +@@ -300,28 +300,28 @@ + $ INFO ) + CALL CHKXER( 'ZGGSVP', INFOT, NOUT, LERR, OK ) + INFOT = 8 +- CALL ZGGSVP( 'N', 'N', 'N', 0, 0, 0, A, 0, B, 1, TOLA, TOLB, ++ CALL ZGGSVP( 'N', 'N', 'N', 2, 1, 1, A, 1, B, 1, TOLA, TOLB, + $ DUMMYK, DUMMYL, U, 1, V, 1, Q, 1, IW, RW, TAU, W, + $ INFO ) + CALL CHKXER( 'ZGGSVP', INFOT, NOUT, LERR, OK ) + INFOT = 10 +- CALL ZGGSVP( 'N', 'N', 'N', 0, 0, 0, A, 1, B, 0, TOLA, TOLB, ++ CALL ZGGSVP( 'N', 'N', 'N', 1, 2, 1, A, 1, B, 1, TOLA, TOLB, + $ DUMMYK, DUMMYL, U, 1, V, 1, Q, 1, IW, RW, TAU, W, + $ INFO ) + CALL CHKXER( 'ZGGSVP', INFOT, NOUT, LERR, OK ) + INFOT = 16 +- CALL ZGGSVP( 'U', 'N', 'N', 0, 0, 0, A, 1, B, 1, TOLA, TOLB, +- $ DUMMYK, DUMMYL, U, 0, V, 1, Q, 1, IW, RW, TAU, W, ++ CALL ZGGSVP( 'U', 'N', 'N', 2, 2, 2, A, 2, B, 2, TOLA, TOLB, ++ $ DUMMYK, DUMMYL, U, 1, V, 1, Q, 1, IW, RW, TAU, W, + $ INFO ) + CALL CHKXER( 'ZGGSVP', INFOT, NOUT, LERR, OK ) + INFOT = 18 +- CALL ZGGSVP( 'N', 'V', 'N', 0, 0, 0, A, 1, B, 1, TOLA, TOLB, +- $ DUMMYK, DUMMYL, U, 1, V, 0, Q, 1, IW, RW, TAU, W, ++ CALL ZGGSVP( 'N', 'V', 'N', 2, 2, 2, A, 2, B, 2, TOLA, TOLB, ++ $ DUMMYK, DUMMYL, U, 2, V, 1, Q, 1, IW, RW, TAU, W, + $ INFO ) + CALL CHKXER( 'ZGGSVP', INFOT, NOUT, LERR, OK ) + INFOT = 20 +- CALL ZGGSVP( 'N', 'N', 'Q', 0, 0, 0, A, 1, B, 1, TOLA, TOLB, +- $ DUMMYK, DUMMYL, U, 1, V, 1, Q, 0, IW, RW, TAU, W, ++ CALL ZGGSVP( 'N', 'N', 'Q', 2, 2, 2, A, 2, B, 2, TOLA, TOLB, ++ $ DUMMYK, DUMMYL, U, 2, V, 2, Q, 1, IW, RW, TAU, W, + $ INFO ) + CALL CHKXER( 'ZGGSVP', INFOT, NOUT, LERR, OK ) + NT = NT + 11 +@@ -518,11 +518,11 @@ + CALL CHKXER( 'ZGGRQF', INFOT, NOUT, LERR, OK ) + NT = NT + 6 + * +-* Test error exits for the ZGS, ZGV, ZGX, and ZXV paths. ++* Test error exits for the CGS, CGV, CGX, and CXV paths. + * +- ELSE IF( LSAMEN( 3, PATH, 'ZGS' ) .OR. +- $ LSAMEN( 3, PATH, 'ZGV' ) .OR. +- $ LSAMEN( 3, PATH, 'ZGX' ) .OR. LSAMEN( 3, PATH, 'ZXV' ) ) ++ ELSE IF( LSAMEN( 3, PATH, 'CGS' ) .OR. ++ $ LSAMEN( 3, PATH, 'CGV' ) .OR. ++ $ LSAMEN( 3, PATH, 'CGX' ) .OR. LSAMEN( 3, PATH, 'CXV' ) ) + $ THEN + * + * ZGGES +diff -uNr LAPACK.orig/TESTING/LIN/cerrqp.f LAPACK/TESTING/LIN/cerrqp.f +--- LAPACK.orig/TESTING/LIN/cerrqp.f Thu Nov 4 14:26:53 1999 ++++ LAPACK/TESTING/LIN/cerrqp.f Fri May 25 16:15:32 2001 +@@ -3,7 +3,7 @@ + * -- LAPACK test routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* June 30, 1999 ++* October 6, 2000 + * + * .. Scalar Arguments .. + CHARACTER*3 PATH +@@ -28,7 +28,7 @@ + * + * .. Parameters .. + INTEGER NMAX +- PARAMETER ( NMAX = 2 ) ++ PARAMETER ( NMAX = 3 ) + * .. + * .. Local Scalars .. + CHARACTER*2 C2 +@@ -98,10 +98,10 @@ + CALL CGEQP3( 1, -1, A, 1, IP, TAU, W, LW, RW, INFO ) + CALL CHKXER( 'CGEQP3', INFOT, NOUT, LERR, OK ) + INFOT = 4 +- CALL CGEQP3( 1, 1, A, 0, IP, TAU, W, LW, RW, INFO ) ++ CALL CGEQP3( 2, 3, A, 1, IP, TAU, W, LW, RW, INFO ) + CALL CHKXER( 'CGEQP3', INFOT, NOUT, LERR, OK ) + INFOT = 8 +- CALL CGEQP3( 2, 2, A, 2, IP, TAU, W, LW-1, RW, INFO ) ++ CALL CGEQP3( 2, 2, A, 2, IP, TAU, W, LW-10, RW, INFO ) + CALL CHKXER( 'CGEQP3', INFOT, NOUT, LERR, OK ) + END IF + * +diff -uNr LAPACK.orig/TESTING/LIN/derrqp.f LAPACK/TESTING/LIN/derrqp.f +--- LAPACK.orig/TESTING/LIN/derrqp.f Thu Nov 4 14:27:03 1999 ++++ LAPACK/TESTING/LIN/derrqp.f Fri May 25 16:15:28 2001 +@@ -3,7 +3,7 @@ + * -- LAPACK test routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* June 30, 1999 ++* October 6, 2000 + * + * .. Scalar Arguments .. + CHARACTER*3 PATH +@@ -13,7 +13,7 @@ + * Purpose + * ======= + * +-* DERRQP tests the error exits for DGEQPF and SGEQP3. ++* DERRQP tests the error exits for DGEQPF and DGEQP3. + * + * Arguments + * ========= +@@ -28,7 +28,7 @@ + * + * .. Parameters .. + INTEGER NMAX +- PARAMETER ( NMAX = 2 ) ++ PARAMETER ( NMAX = 3 ) + * .. + * .. Local Scalars .. + CHARACTER*2 C2 +@@ -93,10 +93,10 @@ + CALL DGEQP3( 1, -1, A, 1, IP, TAU, W, LW, INFO ) + CALL CHKXER( 'DGEQP3', INFOT, NOUT, LERR, OK ) + INFOT = 4 +- CALL DGEQP3( 1, 2, A, 0, IP, TAU, W, LW, INFO ) ++ CALL DGEQP3( 2, 3, A, 1, IP, TAU, W, LW, INFO ) + CALL CHKXER( 'DGEQP3', INFOT, NOUT, LERR, OK ) + INFOT = 8 +- CALL DGEQP3( 2, 2, A, 2, IP, TAU, W, LW-1, INFO ) ++ CALL DGEQP3( 2, 2, A, 2, IP, TAU, W, LW-10, INFO ) + CALL CHKXER( 'DGEQP3', INFOT, NOUT, LERR, OK ) + END IF + * +diff -uNr LAPACK.orig/TESTING/LIN/serrqp.f LAPACK/TESTING/LIN/serrqp.f +--- LAPACK.orig/TESTING/LIN/serrqp.f Thu Nov 4 14:26:44 1999 ++++ LAPACK/TESTING/LIN/serrqp.f Fri May 25 16:15:23 2001 +@@ -3,7 +3,7 @@ + * -- LAPACK test routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* June 30, 1999 ++* October 6, 2000 + * + * .. Scalar Arguments .. + CHARACTER*3 PATH +@@ -28,7 +28,7 @@ + * + * .. Parameters .. + INTEGER NMAX +- PARAMETER ( NMAX = 2 ) ++ PARAMETER ( NMAX = 3 ) + * .. + * .. Local Scalars .. + CHARACTER*2 C2 +@@ -93,10 +93,10 @@ + CALL SGEQP3( 1, -1, A, 1, IP, TAU, W, LW, INFO ) + CALL CHKXER( 'SGEQP3', INFOT, NOUT, LERR, OK ) + INFOT = 4 +- CALL SGEQP3( 1, 2, A, 0, IP, TAU, W, LW, INFO ) ++ CALL SGEQP3( 2, 3, A, 1, IP, TAU, W, LW, INFO ) + CALL CHKXER( 'SGEQP3', INFOT, NOUT, LERR, OK ) + INFOT = 8 +- CALL SGEQP3( 2, 2, A, 2, IP, TAU, W, LW-1, INFO ) ++ CALL SGEQP3( 2, 2, A, 2, IP, TAU, W, LW-10, INFO ) + CALL CHKXER( 'SGEQP3', INFOT, NOUT, LERR, OK ) + END IF + * +diff -uNr LAPACK.orig/TESTING/LIN/zerrqp.f LAPACK/TESTING/LIN/zerrqp.f +--- LAPACK.orig/TESTING/LIN/zerrqp.f Thu Nov 4 14:27:13 1999 ++++ LAPACK/TESTING/LIN/zerrqp.f Fri May 25 16:15:36 2001 +@@ -3,7 +3,7 @@ + * -- LAPACK test routine (version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* June 30, 1999 ++* October 6, 2000 + * + * .. Scalar Arguments .. + CHARACTER*3 PATH +@@ -28,7 +28,7 @@ + * + * .. Parameters .. + INTEGER NMAX +- PARAMETER ( NMAX = 2 ) ++ PARAMETER ( NMAX = 3 ) + * .. + * .. Local Scalars .. + CHARACTER*2 C2 +@@ -98,10 +98,10 @@ + CALL ZGEQP3( 1, -1, A, 1, IP, TAU, W, LW, RW, INFO ) + CALL CHKXER( 'ZGEQP3', INFOT, NOUT, LERR, OK ) + INFOT = 4 +- CALL ZGEQP3( 1, 1, A, 0, IP, TAU, W, LW, RW, INFO ) ++ CALL ZGEQP3( 2, 3, A, 1, IP, TAU, W, LW, RW, INFO ) + CALL CHKXER( 'ZGEQP3', INFOT, NOUT, LERR, OK ) + INFOT = 8 +- CALL ZGEQP3( 2, 2, A, 2, IP, TAU, W, LW-1, RW, INFO ) ++ CALL ZGEQP3( 2, 2, A, 2, IP, TAU, W, LW-10, RW, INFO ) + CALL CHKXER( 'ZGEQP3', INFOT, NOUT, LERR, OK ) + END IF + * +diff -uNr LAPACK.orig/TIMING/EIG/EIGSRC/cbdsqr.f LAPACK/TIMING/EIG/EIGSRC/cbdsqr.f +--- LAPACK.orig/TIMING/EIG/EIGSRC/cbdsqr.f Thu Nov 4 14:28:26 1999 ++++ LAPACK/TIMING/EIG/EIGSRC/cbdsqr.f Fri May 25 16:19:57 2001 +@@ -4,7 +4,7 @@ + * -- LAPACK routine (instrumented to count operations, version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* October 31, 1999 ++* April 25, 2001 + * + * .. Scalar Arguments .. + CHARACTER UPLO +@@ -26,14 +26,26 @@ + * Purpose + * ======= + * +-* CBDSQR computes the singular value decomposition (SVD) of a real +-* N-by-N (upper or lower) bidiagonal matrix B: B = Q * S * P' (P' +-* denotes the transpose of P), where S is a diagonal matrix with +-* non-negative diagonal elements (the singular values of B), and Q +-* and P are orthogonal matrices. +-* +-* The routine computes S, and optionally computes U * Q, P' * VT, +-* or Q' * C, for given complex input matrices U, VT, and C. ++* CBDSQR computes the singular values and, optionally, the right and/or ++* left singular vectors from the singular value decomposition (SVD) of ++* a real N-by-N (upper or lower) bidiagonal matrix B using the implicit ++* zero-shift QR algorithm. The SVD of B has the form ++* ++* B = Q * S * P**H ++* ++* where S is the diagonal matrix of singular values, Q is an orthogonal ++* matrix of left singular vectors, and P is an orthogonal matrix of ++* right singular vectors. If left singular vectors are requested, this ++* subroutine actually returns U*Q instead of Q, and, if right singular ++* vectors are requested, this subroutine returns P**H*VT instead of ++* P**H, for given complex input matrices U and VT. When U and VT are ++* the unitary matrices that reduce a general matrix A to bidiagonal ++* form: A = U*B*VT, as computed by CGEBRD, then ++* ++* A = (U*Q) * S * (P**H*VT) ++* ++* is the SVD of A. Optionally, the subroutine may also compute Q**H*C ++* for a given complex input matrix C. + * + * See "Computing Small Singular Values of Bidiagonal Matrices With + * Guaranteed High Relative Accuracy," by J. Demmel and W. Kahan, +@@ -69,18 +81,17 @@ + * order. + * + * E (input/output) REAL array, dimension (N) +-* On entry, the elements of E contain the +-* offdiagonal elements of of the bidiagonal matrix whose SVD +-* is desired. On normal exit (INFO = 0), E is destroyed. +-* If the algorithm does not converge (INFO > 0), D and E ++* On entry, the N-1 offdiagonal elements of the bidiagonal ++* matrix B. ++* On exit, if INFO = 0, E is destroyed; if INFO > 0, D and E + * will contain the diagonal and superdiagonal elements of a + * bidiagonal matrix orthogonally equivalent to the one given + * as input. E(N) is used for workspace. + * + * VT (input/output) COMPLEX array, dimension (LDVT, NCVT) + * On entry, an N-by-NCVT matrix VT. +-* On exit, VT is overwritten by P' * VT. +-* VT is not referenced if NCVT = 0. ++* On exit, VT is overwritten by P**H * VT. ++* Not referenced if NCVT = 0. + * + * LDVT (input) INTEGER + * The leading dimension of the array VT. +@@ -89,21 +100,22 @@ + * U (input/output) COMPLEX array, dimension (LDU, N) + * On entry, an NRU-by-N matrix U. + * On exit, U is overwritten by U * Q. +-* U is not referenced if NRU = 0. ++* Not referenced if NRU = 0. + * + * LDU (input) INTEGER + * The leading dimension of the array U. LDU >= max(1,NRU). + * + * C (input/output) COMPLEX array, dimension (LDC, NCC) + * On entry, an N-by-NCC matrix C. +-* On exit, C is overwritten by Q' * C. +-* C is not referenced if NCC = 0. ++* On exit, C is overwritten by Q**H * C. ++* Not referenced if NCC = 0. + * + * LDC (input) INTEGER + * The leading dimension of the array C. + * LDC >= max(1,N) if NCC > 0; LDC >=1 if NCC = 0. + * +-* RWORK (workspace) REAL array, dimension (4*N) ++* RWORK (workspace) REAL array, dimension (2*N) ++* if NCVT = NRU = NCC = 0, (max(1, 4*N-4)) otherwise + * + * INFO (output) INTEGER + * = 0: successful exit +diff -uNr LAPACK.orig/TIMING/EIG/EIGSRC/cgghrd.f LAPACK/TIMING/EIG/EIGSRC/cgghrd.f +--- LAPACK.orig/TIMING/EIG/EIGSRC/cgghrd.f Thu Nov 4 14:28:26 1999 ++++ LAPACK/TIMING/EIG/EIGSRC/cgghrd.f Fri May 25 16:20:17 2001 +@@ -4,7 +4,7 @@ + * -- LAPACK routine (instrumented to count operations, version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* September 30, 1994 ++* April 26, 2001 + * + * .. Scalar Arguments .. + CHARACTER COMPQ, COMPZ +@@ -33,16 +33,29 @@ + * + * CGGHRD reduces a pair of complex matrices (A,B) to generalized upper + * Hessenberg form using unitary transformations, where A is a +-* general matrix and B is upper triangular: Q' * A * Z = H and +-* Q' * B * Z = T, where H is upper Hessenberg, T is upper triangular, +-* and Q and Z are unitary, and ' means conjugate transpose. ++* general matrix and B is upper triangular. The form of the generalized ++* eigenvalue problem is ++* A*x = lambda*B*x, ++* and B is typically made upper triangular by computing its QR ++* factorization and moving the unitary matrix Q to the left side ++* of the equation. ++* ++* This subroutine simultaneously reduces A to a Hessenberg matrix H: ++* Q**H*A*Z = H ++* and transforms B to another upper triangular matrix T: ++* Q**H*B*Z = T ++* in order to reduce the problem to its standard form ++* H*y = lambda*T*y ++* where y = Z**H*x. + * + * The unitary matrices Q and Z are determined as products of Givens + * rotations. They may either be formed explicitly, or they may be + * postmultiplied into input matrices Q1 and Z1, so that +-* +-* Q1 * A * Z1' = (Q1*Q) * H * (Z1*Z)' +-* Q1 * B * Z1' = (Q1*Q) * T * (Z1*Z)' ++* Q1 * A * Z1**H = (Q1*Q) * H * (Z1*Z)**H ++* Q1 * B * Z1**H = (Q1*Q) * T * (Z1*Z)**H ++* If Q1 is the unitary matrix from the QR factorization of B in the ++* original equation A*x = lambda*B*x, then CGGHRD reduces the original ++* problem to generalized Hessenberg form. + * + * Arguments + * ========= +@@ -66,10 +79,11 @@ + * + * ILO (input) INTEGER + * IHI (input) INTEGER +-* It is assumed that A is already upper triangular in rows and +-* columns 1:ILO-1 and IHI+1:N. ILO and IHI are normally set +-* by a previous call to CGGBAL; otherwise they should be set +-* to 1 and N respectively. ++* ILO and IHI mark the rows and columns of A which are to be ++* reduced. It is assumed that A is already upper triangular ++* in rows and columns 1:ILO-1 and IHI+1:N. ILO and IHI are ++* normally set by a previous call to CGGBAL; otherwise they ++* should be set to 1 and N respectively. + * 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0. + * + * A (input/output) COMPLEX array, dimension (LDA, N) +@@ -83,33 +97,28 @@ + * + * B (input/output) COMPLEX array, dimension (LDB, N) + * On entry, the N-by-N upper triangular matrix B. +-* On exit, the upper triangular matrix T = Q' B Z. The ++* On exit, the upper triangular matrix T = Q**H B Z. The + * elements below the diagonal are set to zero. + * + * LDB (input) INTEGER + * The leading dimension of the array B. LDB >= max(1,N). + * + * Q (input/output) COMPLEX array, dimension (LDQ, N) +-* If COMPQ='N': Q is not referenced. +-* If COMPQ='I': on entry, Q need not be set, and on exit it +-* contains the unitary matrix Q, where Q' +-* is the product of the Givens transformations +-* which are applied to A and B on the left. +-* If COMPQ='V': on entry, Q must contain a unitary matrix +-* Q1, and on exit this is overwritten by Q1*Q. ++* On entry, if COMPQ = 'V', the unitary matrix Q1, typically ++* from the QR factorization of B. ++* On exit, if COMPQ='I', the unitary matrix Q, and if ++* COMPQ = 'V', the product Q1*Q. ++* Not referenced if COMPQ='N'. + * + * LDQ (input) INTEGER + * The leading dimension of the array Q. + * LDQ >= N if COMPQ='V' or 'I'; LDQ >= 1 otherwise. + * + * Z (input/output) COMPLEX array, dimension (LDZ, N) +-* If COMPZ='N': Z is not referenced. +-* If COMPZ='I': on entry, Z need not be set, and on exit it +-* contains the unitary matrix Z, which is +-* the product of the Givens transformations +-* which are applied to A and B on the right. +-* If COMPZ='V': on entry, Z must contain a unitary matrix +-* Z1, and on exit this is overwritten by Z1*Z. ++* On entry, if COMPZ = 'V', the unitary matrix Z1. ++* On exit, if COMPZ='I', the unitary matrix Z, and if ++* COMPZ = 'V', the product Z1*Z. ++* Not referenced if COMPZ='N'. + * + * LDZ (input) INTEGER + * The leading dimension of the array Z. +diff -uNr LAPACK.orig/TIMING/EIG/EIGSRC/chgeqz.f LAPACK/TIMING/EIG/EIGSRC/chgeqz.f +--- LAPACK.orig/TIMING/EIG/EIGSRC/chgeqz.f Thu Nov 4 14:28:26 1999 ++++ LAPACK/TIMING/EIG/EIGSRC/chgeqz.f Fri May 25 16:20:35 2001 +@@ -1,20 +1,21 @@ +- SUBROUTINE CHGEQZ( JOB, COMPQ, COMPZ, N, ILO, IHI, A, LDA, B, LDB, ++ SUBROUTINE CHGEQZ( JOB, COMPQ, COMPZ, N, ILO, IHI, H, LDH, T, LDT, + $ ALPHA, BETA, Q, LDQ, Z, LDZ, WORK, LWORK, + $ RWORK, INFO ) + * + * -- LAPACK routine (instrumented to count operations, version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* June 30, 1999 ++* May 3, 2001 + * + * .. Scalar Arguments .. + CHARACTER COMPQ, COMPZ, JOB +- INTEGER IHI, ILO, INFO, LDA, LDB, LDQ, LDZ, LWORK, N ++ INTEGER IHI, ILO, INFO, LDH, LDQ, LDT, LDZ, LWORK, N + * .. + * .. Array Arguments .. + REAL RWORK( * ) +- COMPLEX A( LDA, * ), ALPHA( * ), B( LDB, * ), +- $ BETA( * ), Q( LDQ, * ), WORK( * ), Z( LDZ, * ) ++ COMPLEX ALPHA( * ), BETA( * ), H( LDH, * ), ++ $ Q( LDQ, * ), T( LDT, * ), WORK( * ), ++ $ Z( LDZ, * ) + * .. + * + * ----------------------- Begin Timing Code ------------------------ +@@ -34,24 +35,44 @@ + * Purpose + * ======= + * +-* CHGEQZ implements a single-shift version of the QZ +-* method for finding the generalized eigenvalues w(i)=ALPHA(i)/BETA(i) +-* of the equation +-* +-* det( A - w(i) B ) = 0 +-* +-* If JOB='S', then the pair (A,B) is simultaneously +-* reduced to Schur form (i.e., A and B are both upper triangular) by +-* applying one unitary tranformation (usually called Q) on the left and +-* another (usually called Z) on the right. The diagonal elements of +-* A are then ALPHA(1),...,ALPHA(N), and of B are BETA(1),...,BETA(N). +-* +-* If JOB='S' and COMPQ and COMPZ are 'V' or 'I', then the unitary +-* transformations used to reduce (A,B) are accumulated into the arrays +-* Q and Z s.t.: +-* +-* Q(in) A(in) Z(in)* = Q(out) A(out) Z(out)* +-* Q(in) B(in) Z(in)* = Q(out) B(out) Z(out)* ++* CHGEQZ computes the eigenvalues of a complex matrix pair (H,T), ++* where H is an upper Hessenberg matrix and T is upper triangular, ++* using the single-shift QZ method. ++* Matrix pairs of this type are produced by the reduction to ++* generalized upper Hessenberg form of a complex matrix pair (A,B): ++* ++* A = Q1*H*Z1**H, B = Q1*T*Z1**H, ++* ++* as computed by CGGHRD. ++* ++* If JOB='S', then the Hessenberg-triangular pair (H,T) is ++* also reduced to generalized Schur form, ++* ++* H = Q*S*Z**H, T = Q*P*Z**H, ++* ++* where Q and Z are unitary matrices and S and P are upper triangular. ++* ++* Optionally, the unitary matrix Q from the generalized Schur ++* factorization may be postmultiplied into an input matrix Q1, and the ++* unitary matrix Z may be postmultiplied into an input matrix Z1. ++* If Q1 and Z1 are the unitary matrices from CGGHRD that reduced ++* the matrix pair (A,B) to generalized Hessenberg form, then the output ++* matrices Q1*Q and Z1*Z are the unitary factors from the generalized ++* Schur factorization of (A,B): ++* ++* A = (Q1*Q)*S*(Z1*Z)**H, B = (Q1*Q)*P*(Z1*Z)**H. ++* ++* To avoid overflow, eigenvalues of the matrix pair (H,T) ++* (equivalently, of (A,B)) are computed as a pair of complex values ++* (alpha,beta). If beta is nonzero, lambda = alpha / beta is an ++* eigenvalue of the generalized nonsymmetric eigenvalue problem (GNEP) ++* A*x = lambda*B*x ++* and if alpha is nonzero, mu = beta / alpha is an eigenvalue of the ++* alternate form of the GNEP ++* mu*A*y = B*y. ++* The values of alpha and beta for the i-th eigenvalue can be read ++* directly from the generalized Schur form: alpha = S(i,i), ++* beta = P(i,i). + * + * Ref: C.B. Moler & G.W. Stewart, "An Algorithm for Generalized Matrix + * Eigenvalue Problems", SIAM J. Numer. Anal., 10(1973), +@@ -61,83 +82,88 @@ + * ========= + * + * JOB (input) CHARACTER*1 +-* = 'E': compute only ALPHA and BETA. A and B will not +-* necessarily be put into generalized Schur form. +-* = 'S': put A and B into generalized Schur form, as well +-* as computing ALPHA and BETA. ++* = 'E': Compute eigenvalues only; ++* = 'S': Computer eigenvalues and the Schur form. + * + * COMPQ (input) CHARACTER*1 +-* = 'N': do not modify Q. +-* = 'V': multiply the array Q on the right by the conjugate +-* transpose of the unitary tranformation that is +-* applied to the left side of A and B to reduce them +-* to Schur form. +-* = 'I': like COMPQ='V', except that Q will be initialized to +-* the identity first. ++* = 'N': Left Schur vectors (Q) are not computed; ++* = 'I': Q is initialized to the unit matrix and the matrix Q ++* of left Schur vectors of (H,T) is returned; ++* = 'V': Q must contain a unitary matrix Q1 on entry and ++* the product Q1*Q is returned. + * + * COMPZ (input) CHARACTER*1 +-* = 'N': do not modify Z. +-* = 'V': multiply the array Z on the right by the unitary +-* tranformation that is applied to the right side of +-* A and B to reduce them to Schur form. +-* = 'I': like COMPZ='V', except that Z will be initialized to +-* the identity first. ++* = 'N': Right Schur vectors (Z) are not computed; ++* = 'I': Q is initialized to the unit matrix and the matrix Z ++* of right Schur vectors of (H,T) is returned; ++* = 'V': Z must contain a unitary matrix Z1 on entry and ++* the product Z1*Z is returned. + * + * N (input) INTEGER +-* The order of the matrices A, B, Q, and Z. N >= 0. ++* The order of the matrices H, T, Q, and Z. N >= 0. + * + * ILO (input) INTEGER + * IHI (input) INTEGER +-* It is assumed that A is already upper triangular in rows and +-* columns 1:ILO-1 and IHI+1:N. +-* 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0. +-* +-* A (input/output) COMPLEX array, dimension (LDA, N) +-* On entry, the N-by-N upper Hessenberg matrix A. Elements +-* below the subdiagonal must be zero. +-* If JOB='S', then on exit A and B will have been +-* simultaneously reduced to upper triangular form. +-* If JOB='E', then on exit A will have been destroyed. +-* +-* LDA (input) INTEGER +-* The leading dimension of the array A. LDA >= max( 1, N ). +-* +-* B (input/output) COMPLEX array, dimension (LDB, N) +-* On entry, the N-by-N upper triangular matrix B. Elements +-* below the diagonal must be zero. +-* If JOB='S', then on exit A and B will have been +-* simultaneously reduced to upper triangular form. +-* If JOB='E', then on exit B will have been destroyed. ++* ILO and IHI mark the rows and columns of H which are in ++* Hessenberg form. It is assumed that A is already upper ++* triangular in rows and columns 1:ILO-1 and IHI+1:N. ++* If N > 0, 1 <= ILO <= IHI <= N; if N = 0, ILO=1 and IHI=0. ++* ++* H (input/output) COMPLEX array, dimension (LDH, N) ++* On entry, the N-by-N upper Hessenberg matrix H. ++* On exit, if JOB = 'S', H contains the upper triangular ++* matrix S from the generalized Schur factorization. ++* If JOB = 'E', the diagonal of H matches that of S, but ++* the rest of H is unspecified. ++* ++* LDH (input) INTEGER ++* The leading dimension of the array H. LDH >= max( 1, N ). ++* ++* T (input/output) COMPLEX array, dimension (LDT, N) ++* On entry, the N-by-N upper triangular matrix T. ++* On exit, if JOB = 'S', T contains the upper triangular ++* matrix P from the generalized Schur factorization. ++* If JOB = 'E', the diagonal of T matches that of P, but ++* the rest of T is unspecified. + * +-* LDB (input) INTEGER +-* The leading dimension of the array B. LDB >= max( 1, N ). ++* LDT (input) INTEGER ++* The leading dimension of the array T. LDT >= max( 1, N ). + * + * ALPHA (output) COMPLEX array, dimension (N) +-* The diagonal elements of A when the pair (A,B) has been +-* reduced to Schur form. ALPHA(i)/BETA(i) i=1,...,N +-* are the generalized eigenvalues. ++* The complex scalars alpha that define the eigenvalues of ++* GNEP. ALPHA(i) = S(i,i) in the generalized Schur ++* factorization. + * + * BETA (output) COMPLEX array, dimension (N) +-* The diagonal elements of B when the pair (A,B) has been +-* reduced to Schur form. ALPHA(i)/BETA(i) i=1,...,N +-* are the generalized eigenvalues. A and B are normalized +-* so that BETA(1),...,BETA(N) are non-negative real numbers. ++* The real non-negative scalars beta that define the ++* eigenvalues of GNEP. BETA(i) = P(i,i) in the generalized ++* Schur factorization. ++* ++* Together, the quantities alpha = ALPHA(j) and beta = BETA(j) ++* represent the j-th eigenvalue of the matrix pair (A,B), in ++* one of the forms lambda = alpha/beta or mu = beta/alpha. ++* Since either lambda or mu may overflow, they should not, ++* in general, be computed. + * + * Q (input/output) COMPLEX array, dimension (LDQ, N) +-* If COMPQ='N', then Q will not be referenced. +-* If COMPQ='V' or 'I', then the conjugate transpose of the +-* unitary transformations which are applied to A and B on +-* the left will be applied to the array Q on the right. ++* On entry, if COMPZ = 'V', the unitary matrix Q1 used in the ++* reduction of (A,B) to generalized Hessenberg form. ++* On exit, if COMPZ = 'I', the unitary matrix of left Schur ++* vectors of (H,T), and if COMPZ = 'V', the unitary matrix of ++* left Schur vectors of (A,B). ++* Not referenced if COMPZ = 'N'. + * + * LDQ (input) INTEGER + * The leading dimension of the array Q. LDQ >= 1. + * If COMPQ='V' or 'I', then LDQ >= N. + * + * Z (input/output) COMPLEX array, dimension (LDZ, N) +-* If COMPZ='N', then Z will not be referenced. +-* If COMPZ='V' or 'I', then the unitary transformations which +-* are applied to A and B on the right will be applied to the +-* array Z on the right. ++* On entry, if COMPZ = 'V', the unitary matrix Z1 used in the ++* reduction of (A,B) to generalized Hessenberg form. ++* On exit, if COMPZ = 'I', the unitary matrix of right Schur ++* vectors of (H,T), and if COMPZ = 'V', the unitary matrix of ++* right Schur vectors of (A,B). ++* Not referenced if COMPZ = 'N'. + * + * LDZ (input) INTEGER + * The leading dimension of the array Z. LDZ >= 1. +@@ -159,13 +185,12 @@ + * INFO (output) INTEGER + * = 0: successful exit + * < 0: if INFO = -i, the i-th argument had an illegal value +-* = 1,...,N: the QZ iteration did not converge. (A,B) is not ++* = 1,...,N: the QZ iteration did not converge. (H,T) is not + * in Schur form, but ALPHA(i) and BETA(i), + * i=INFO+1,...,N should be correct. +-* = N+1,...,2*N: the shift calculation failed. (A,B) is not ++* = N+1,...,2*N: the shift calculation failed. (H,T) is not + * in Schur form, but ALPHA(i) and BETA(i), + * i=INFO-N+1,...,N should be correct. +-* > 2*N: various "impossible" errors. + * + * Further Details + * =============== +@@ -192,7 +217,7 @@ + REAL ABSB, ANORM, ASCALE, ATOL, BNORM, BSCALE, BTOL, + $ C, OPST, SAFMIN, TEMP, TEMP2, TEMPR, ULP + COMPLEX ABI22, AD11, AD12, AD21, AD22, CTEMP, CTEMP2, +- $ CTEMP3, ESHIFT, RTDISC, S, SHIFT, SIGNBC, T, ++ $ CTEMP3, ESHIFT, RTDISC, S, SHIFT, SIGNBC, T1, + $ U12, X + * .. + * .. External Functions .. +@@ -278,9 +303,9 @@ + INFO = -5 + ELSE IF( IHI.GT.N .OR. IHI.LT.ILO-1 ) THEN + INFO = -6 +- ELSE IF( LDA.LT.N ) THEN ++ ELSE IF( LDH.LT.N ) THEN + INFO = -8 +- ELSE IF( LDB.LT.N ) THEN ++ ELSE IF( LDT.LT.N ) THEN + INFO = -10 + ELSE IF( LDQ.LT.1 .OR. ( ILQ .AND. LDQ.LT.N ) ) THEN + INFO = -14 +@@ -316,8 +341,8 @@ + IN = IHI + 1 - ILO + SAFMIN = SLAMCH( 'S' ) + ULP = SLAMCH( 'E' )*SLAMCH( 'B' ) +- ANORM = CLANHS( 'F', IN, A( ILO, ILO ), LDA, RWORK ) +- BNORM = CLANHS( 'F', IN, B( ILO, ILO ), LDB, RWORK ) ++ ANORM = CLANHS( 'F', IN, H( ILO, ILO ), LDH, RWORK ) ++ BNORM = CLANHS( 'F', IN, T( ILO, ILO ), LDT, RWORK ) + ATOL = MAX( SAFMIN, ULP*ANORM ) + BTOL = MAX( SAFMIN, ULP*BNORM ) + ASCALE = ONE / MAX( SAFMIN, ANORM ) +@@ -334,18 +359,18 @@ + * Set Eigenvalues IHI+1:N + * + DO 10 J = IHI + 1, N +- ABSB = ABS( B( J, J ) ) ++ ABSB = ABS( T( J, J ) ) + IF( ABSB.GT.SAFMIN ) THEN +- SIGNBC = CONJG( B( J, J ) / ABSB ) +- B( J, J ) = ABSB ++ SIGNBC = CONJG( T( J, J ) / ABSB ) ++ T( J, J ) = ABSB + IF( ILSCHR ) THEN +- CALL CSCAL( J-1, SIGNBC, B( 1, J ), 1 ) +- CALL CSCAL( J, SIGNBC, A( 1, J ), 1 ) ++ CALL CSCAL( J-1, SIGNBC, T( 1, J ), 1 ) ++ CALL CSCAL( J, SIGNBC, H( 1, J ), 1 ) + * ----------------- Begin Timing Code --------------------- + OPST = OPST + REAL( 12*( J-1 ) ) + * ------------------ End Timing Code ---------------------- + ELSE +- A( J, J ) = A( J, J )*SIGNBC ++ H( J, J ) = H( J, J )*SIGNBC + END IF + IF( ILZ ) + $ CALL CSCAL( N, SIGNBC, Z( 1, J ), 1 ) +@@ -353,10 +378,10 @@ + OPST = OPST + REAL( 6*NZ+13 ) + * -------------------- End Timing Code ----------------------- + ELSE +- B( J, J ) = CZERO ++ T( J, J ) = CZERO + END IF +- ALPHA( J ) = A( J, J ) +- BETA( J ) = B( J, J ) ++ ALPHA( J ) = H( J, J ) ++ BETA( J ) = T( J, J ) + 10 CONTINUE + * + * If IHI < ILO, skip QZ steps +@@ -401,22 +426,22 @@ + * Split the matrix if possible. + * + * Two tests: +-* 1: A(j,j-1)=0 or j=ILO +-* 2: B(j,j)=0 ++* 1: H(j,j-1)=0 or j=ILO ++* 2: T(j,j)=0 + * + * Special case: j=ILAST + * + IF( ILAST.EQ.ILO ) THEN + GO TO 60 + ELSE +- IF( ABS1( A( ILAST, ILAST-1 ) ).LE.ATOL ) THEN +- A( ILAST, ILAST-1 ) = CZERO ++ IF( ABS1( H( ILAST, ILAST-1 ) ).LE.ATOL ) THEN ++ H( ILAST, ILAST-1 ) = CZERO + GO TO 60 + END IF + END IF + * +- IF( ABS( B( ILAST, ILAST ) ).LE.BTOL ) THEN +- B( ILAST, ILAST ) = CZERO ++ IF( ABS( T( ILAST, ILAST ) ).LE.BTOL ) THEN ++ T( ILAST, ILAST ) = CZERO + GO TO 50 + END IF + * +@@ -424,30 +449,30 @@ + * + DO 40 J = ILAST - 1, ILO, -1 + * +-* Test 1: for A(j,j-1)=0 or j=ILO ++* Test 1: for H(j,j-1)=0 or j=ILO + * + IF( J.EQ.ILO ) THEN + ILAZRO = .TRUE. + ELSE +- IF( ABS1( A( J, J-1 ) ).LE.ATOL ) THEN +- A( J, J-1 ) = CZERO ++ IF( ABS1( H( J, J-1 ) ).LE.ATOL ) THEN ++ H( J, J-1 ) = CZERO + ILAZRO = .TRUE. + ELSE + ILAZRO = .FALSE. + END IF + END IF + * +-* Test 2: for B(j,j)=0 ++* Test 2: for T(j,j)=0 + * +- IF( ABS( B( J, J ) ).LT.BTOL ) THEN +- B( J, J ) = CZERO ++ IF( ABS( T( J, J ) ).LT.BTOL ) THEN ++ T( J, J ) = CZERO + * + * Test 1a: Check for 2 consecutive small subdiagonals in A + * + ILAZR2 = .FALSE. + IF( .NOT.ILAZRO ) THEN +- IF( ABS1( A( J, J-1 ) )*( ASCALE*ABS1( A( J+1, +- $ J ) ) ).LE.ABS1( A( J, J ) )*( ASCALE*ATOL ) ) ++ IF( ABS1( H( J, J-1 ) )*( ASCALE*ABS1( H( J+1, ++ $ J ) ) ).LE.ABS1( H( J, J ) )*( ASCALE*ATOL ) ) + $ ILAZR2 = .TRUE. + END IF + * +@@ -459,24 +484,24 @@ + * + IF( ILAZRO .OR. ILAZR2 ) THEN + DO 20 JCH = J, ILAST - 1 +- CTEMP = A( JCH, JCH ) +- CALL CLARTG( CTEMP, A( JCH+1, JCH ), C, S, +- $ A( JCH, JCH ) ) +- A( JCH+1, JCH ) = CZERO +- CALL CROT( ILASTM-JCH, A( JCH, JCH+1 ), LDA, +- $ A( JCH+1, JCH+1 ), LDA, C, S ) +- CALL CROT( ILASTM-JCH, B( JCH, JCH+1 ), LDB, +- $ B( JCH+1, JCH+1 ), LDB, C, S ) ++ CTEMP = H( JCH, JCH ) ++ CALL CLARTG( CTEMP, H( JCH+1, JCH ), C, S, ++ $ H( JCH, JCH ) ) ++ H( JCH+1, JCH ) = CZERO ++ CALL CROT( ILASTM-JCH, H( JCH, JCH+1 ), LDH, ++ $ H( JCH+1, JCH+1 ), LDH, C, S ) ++ CALL CROT( ILASTM-JCH, T( JCH, JCH+1 ), LDT, ++ $ T( JCH+1, JCH+1 ), LDT, C, S ) + IF( ILQ ) + $ CALL CROT( N, Q( 1, JCH ), 1, Q( 1, JCH+1 ), 1, + $ C, CONJG( S ) ) + IF( ILAZR2 ) +- $ A( JCH, JCH-1 ) = A( JCH, JCH-1 )*C ++ $ H( JCH, JCH-1 ) = H( JCH, JCH-1 )*C + ILAZR2 = .FALSE. + * --------------- Begin Timing Code ----------------- + OPST = OPST + REAL( 32+40*( ILASTM-JCH )+20*NQ ) + * ---------------- End Timing Code ------------------ +- IF( ABS1( B( JCH+1, JCH+1 ) ).GE.BTOL ) THEN ++ IF( ABS1( T( JCH+1, JCH+1 ) ).GE.BTOL ) THEN + IF( JCH+1.GE.ILAST ) THEN + GO TO 60 + ELSE +@@ -484,35 +509,35 @@ + GO TO 70 + END IF + END IF +- B( JCH+1, JCH+1 ) = CZERO ++ T( JCH+1, JCH+1 ) = CZERO + 20 CONTINUE + GO TO 50 + ELSE + * +-* Only test 2 passed -- chase the zero to B(ILAST,ILAST) +-* Then process as in the case B(ILAST,ILAST)=0 ++* Only test 2 passed -- chase the zero to T(ILAST,ILAST) ++* Then process as in the case T(ILAST,ILAST)=0 + * + DO 30 JCH = J, ILAST - 1 +- CTEMP = B( JCH, JCH+1 ) +- CALL CLARTG( CTEMP, B( JCH+1, JCH+1 ), C, S, +- $ B( JCH, JCH+1 ) ) +- B( JCH+1, JCH+1 ) = CZERO ++ CTEMP = T( JCH, JCH+1 ) ++ CALL CLARTG( CTEMP, T( JCH+1, JCH+1 ), C, S, ++ $ T( JCH, JCH+1 ) ) ++ T( JCH+1, JCH+1 ) = CZERO + IF( JCH.LT.ILASTM-1 ) +- $ CALL CROT( ILASTM-JCH-1, B( JCH, JCH+2 ), LDB, +- $ B( JCH+1, JCH+2 ), LDB, C, S ) +- CALL CROT( ILASTM-JCH+2, A( JCH, JCH-1 ), LDA, +- $ A( JCH+1, JCH-1 ), LDA, C, S ) ++ $ CALL CROT( ILASTM-JCH-1, T( JCH, JCH+2 ), LDT, ++ $ T( JCH+1, JCH+2 ), LDT, C, S ) ++ CALL CROT( ILASTM-JCH+2, H( JCH, JCH-1 ), LDH, ++ $ H( JCH+1, JCH-1 ), LDH, C, S ) + IF( ILQ ) + $ CALL CROT( N, Q( 1, JCH ), 1, Q( 1, JCH+1 ), 1, + $ C, CONJG( S ) ) +- CTEMP = A( JCH+1, JCH ) +- CALL CLARTG( CTEMP, A( JCH+1, JCH-1 ), C, S, +- $ A( JCH+1, JCH ) ) +- A( JCH+1, JCH-1 ) = CZERO +- CALL CROT( JCH+1-IFRSTM, A( IFRSTM, JCH ), 1, +- $ A( IFRSTM, JCH-1 ), 1, C, S ) +- CALL CROT( JCH-IFRSTM, B( IFRSTM, JCH ), 1, +- $ B( IFRSTM, JCH-1 ), 1, C, S ) ++ CTEMP = H( JCH+1, JCH ) ++ CALL CLARTG( CTEMP, H( JCH+1, JCH-1 ), C, S, ++ $ H( JCH+1, JCH ) ) ++ H( JCH+1, JCH-1 ) = CZERO ++ CALL CROT( JCH+1-IFRSTM, H( IFRSTM, JCH ), 1, ++ $ H( IFRSTM, JCH-1 ), 1, C, S ) ++ CALL CROT( JCH-IFRSTM, T( IFRSTM, JCH ), 1, ++ $ T( IFRSTM, JCH-1 ), 1, C, S ) + IF( ILZ ) + $ CALL CROT( N, Z( 1, JCH ), 1, Z( 1, JCH-1 ), 1, + $ C, S ) +@@ -542,40 +567,40 @@ + INFO = 2*N + 1 + GO TO 210 + * +-* B(ILAST,ILAST)=0 -- clear A(ILAST,ILAST-1) to split off a ++* T(ILAST,ILAST)=0 -- clear H(ILAST,ILAST-1) to split off a + * 1x1 block. + * + 50 CONTINUE +- CTEMP = A( ILAST, ILAST ) +- CALL CLARTG( CTEMP, A( ILAST, ILAST-1 ), C, S, +- $ A( ILAST, ILAST ) ) +- A( ILAST, ILAST-1 ) = CZERO +- CALL CROT( ILAST-IFRSTM, A( IFRSTM, ILAST ), 1, +- $ A( IFRSTM, ILAST-1 ), 1, C, S ) +- CALL CROT( ILAST-IFRSTM, B( IFRSTM, ILAST ), 1, +- $ B( IFRSTM, ILAST-1 ), 1, C, S ) ++ CTEMP = H( ILAST, ILAST ) ++ CALL CLARTG( CTEMP, H( ILAST, ILAST-1 ), C, S, ++ $ H( ILAST, ILAST ) ) ++ H( ILAST, ILAST-1 ) = CZERO ++ CALL CROT( ILAST-IFRSTM, H( IFRSTM, ILAST ), 1, ++ $ H( IFRSTM, ILAST-1 ), 1, C, S ) ++ CALL CROT( ILAST-IFRSTM, T( IFRSTM, ILAST ), 1, ++ $ T( IFRSTM, ILAST-1 ), 1, C, S ) + IF( ILZ ) + $ CALL CROT( N, Z( 1, ILAST ), 1, Z( 1, ILAST-1 ), 1, C, S ) + * --------------------- Begin Timing Code ----------------------- + OPST = OPST + REAL( 32+40*( ILAST-IFRSTM )+20*NZ ) + * ---------------------- End Timing Code ------------------------ + * +-* A(ILAST,ILAST-1)=0 -- Standardize B, set ALPHA and BETA ++* H(ILAST,ILAST-1)=0 -- Standardize B, set ALPHA and BETA + * + 60 CONTINUE +- ABSB = ABS( B( ILAST, ILAST ) ) ++ ABSB = ABS( T( ILAST, ILAST ) ) + IF( ABSB.GT.SAFMIN ) THEN +- SIGNBC = CONJG( B( ILAST, ILAST ) / ABSB ) +- B( ILAST, ILAST ) = ABSB ++ SIGNBC = CONJG( T( ILAST, ILAST ) / ABSB ) ++ T( ILAST, ILAST ) = ABSB + IF( ILSCHR ) THEN +- CALL CSCAL( ILAST-IFRSTM, SIGNBC, B( IFRSTM, ILAST ), 1 ) +- CALL CSCAL( ILAST+1-IFRSTM, SIGNBC, A( IFRSTM, ILAST ), ++ CALL CSCAL( ILAST-IFRSTM, SIGNBC, T( IFRSTM, ILAST ), 1 ) ++ CALL CSCAL( ILAST+1-IFRSTM, SIGNBC, H( IFRSTM, ILAST ), + $ 1 ) + * ----------------- Begin Timing Code --------------------- + OPST = OPST + REAL( 12*( ILAST-IFRSTM ) ) + * ------------------ End Timing Code ---------------------- + ELSE +- A( ILAST, ILAST ) = A( ILAST, ILAST )*SIGNBC ++ H( ILAST, ILAST ) = H( ILAST, ILAST )*SIGNBC + END IF + IF( ILZ ) + $ CALL CSCAL( N, SIGNBC, Z( 1, ILAST ), 1 ) +@@ -583,10 +608,10 @@ + OPST = OPST + REAL( 6*NZ+13 ) + * -------------------- End Timing Code ----------------------- + ELSE +- B( ILAST, ILAST ) = CZERO ++ T( ILAST, ILAST ) = CZERO + END IF +- ALPHA( ILAST ) = A( ILAST, ILAST ) +- BETA( ILAST ) = B( ILAST, ILAST ) ++ ALPHA( ILAST ) = H( ILAST, ILAST ) ++ BETA( ILAST ) = T( ILAST, ILAST ) + * + * Go to next block -- exit if finished. + * +@@ -619,7 +644,7 @@ + * Compute the Shift. + * + * At this point, IFIRST < ILAST, and the diagonal elements of +-* B(IFIRST:ILAST,IFIRST,ILAST) are larger than BTOL (in ++* T(IFIRST:ILAST,IFIRST,ILAST) are larger than BTOL (in + * magnitude) + * + IF( ( IITER / 10 )*10.NE.IITER ) THEN +@@ -631,26 +656,26 @@ + * We factor B as U*D, where U has unit diagonals, and + * compute (A*inv(D))*inv(U). + * +- U12 = ( BSCALE*B( ILAST-1, ILAST ) ) / +- $ ( BSCALE*B( ILAST, ILAST ) ) +- AD11 = ( ASCALE*A( ILAST-1, ILAST-1 ) ) / +- $ ( BSCALE*B( ILAST-1, ILAST-1 ) ) +- AD21 = ( ASCALE*A( ILAST, ILAST-1 ) ) / +- $ ( BSCALE*B( ILAST-1, ILAST-1 ) ) +- AD12 = ( ASCALE*A( ILAST-1, ILAST ) ) / +- $ ( BSCALE*B( ILAST, ILAST ) ) +- AD22 = ( ASCALE*A( ILAST, ILAST ) ) / +- $ ( BSCALE*B( ILAST, ILAST ) ) ++ U12 = ( BSCALE*T( ILAST-1, ILAST ) ) / ++ $ ( BSCALE*T( ILAST, ILAST ) ) ++ AD11 = ( ASCALE*H( ILAST-1, ILAST-1 ) ) / ++ $ ( BSCALE*T( ILAST-1, ILAST-1 ) ) ++ AD21 = ( ASCALE*H( ILAST, ILAST-1 ) ) / ++ $ ( BSCALE*T( ILAST-1, ILAST-1 ) ) ++ AD12 = ( ASCALE*H( ILAST-1, ILAST ) ) / ++ $ ( BSCALE*T( ILAST, ILAST ) ) ++ AD22 = ( ASCALE*H( ILAST, ILAST ) ) / ++ $ ( BSCALE*T( ILAST, ILAST ) ) + ABI22 = AD22 - U12*AD21 + * +- T = HALF*( AD11+ABI22 ) +- RTDISC = SQRT( T**2+AD12*AD21-AD11*AD22 ) +- TEMP = REAL( T-ABI22 )*REAL( RTDISC ) + +- $ AIMAG( T-ABI22 )*AIMAG( RTDISC ) ++ T1 = HALF*( AD11+ABI22 ) ++ RTDISC = SQRT( T1**2+AD12*AD21-AD11*AD22 ) ++ TEMP = REAL( T1-ABI22 )*REAL( RTDISC ) + ++ $ AIMAG( T1-ABI22 )*AIMAG( RTDISC ) + IF( TEMP.LE.ZERO ) THEN +- SHIFT = T + RTDISC ++ SHIFT = T1 + RTDISC + ELSE +- SHIFT = T - RTDISC ++ SHIFT = T1 - RTDISC + END IF + * + * ------------------- Begin Timing Code ---------------------- +@@ -661,8 +686,8 @@ + * + * Exceptional shift. Chosen for no particularly good reason. + * +- ESHIFT = ESHIFT + CONJG( ( ASCALE*A( ILAST-1, ILAST ) ) / +- $ ( BSCALE*B( ILAST-1, ILAST-1 ) ) ) ++ ESHIFT = ESHIFT + CONJG( ( ASCALE*H( ILAST-1, ILAST ) ) / ++ $ ( BSCALE*T( ILAST-1, ILAST-1 ) ) ) + SHIFT = ESHIFT + * + * ------------------- Begin Timing Code ---------------------- +@@ -675,21 +700,21 @@ + * + DO 80 J = ILAST - 1, IFIRST + 1, -1 + ISTART = J +- CTEMP = ASCALE*A( J, J ) - SHIFT*( BSCALE*B( J, J ) ) ++ CTEMP = ASCALE*H( J, J ) - SHIFT*( BSCALE*T( J, J ) ) + TEMP = ABS1( CTEMP ) +- TEMP2 = ASCALE*ABS1( A( J+1, J ) ) ++ TEMP2 = ASCALE*ABS1( H( J+1, J ) ) + TEMPR = MAX( TEMP, TEMP2 ) + IF( TEMPR.LT.ONE .AND. TEMPR.NE.ZERO ) THEN + TEMP = TEMP / TEMPR + TEMP2 = TEMP2 / TEMPR + END IF +- IF( ABS1( A( J, J-1 ) )*TEMP2.LE.TEMP*ATOL ) ++ IF( ABS1( H( J, J-1 ) )*TEMP2.LE.TEMP*ATOL ) + $ GO TO 90 + 80 CONTINUE + * + ISTART = IFIRST +- CTEMP = ASCALE*A( IFIRST, IFIRST ) - +- $ SHIFT*( BSCALE*B( IFIRST, IFIRST ) ) ++ CTEMP = ASCALE*H( IFIRST, IFIRST ) - ++ $ SHIFT*( BSCALE*T( IFIRST, IFIRST ) ) + * + * --------------------- Begin Timing Code ----------------------- + OPST = OPST - REAL( 6 ) +@@ -701,7 +726,7 @@ + * + * Initial Q + * +- CTEMP2 = ASCALE*A( ISTART+1, ISTART ) ++ CTEMP2 = ASCALE*H( ISTART+1, ISTART ) + * + * --------------------- Begin Timing Code ----------------------- + OPST = OPST + REAL( 2+( ILAST-ISTART )*18 ) +@@ -713,18 +738,18 @@ + * + DO 150 J = ISTART, ILAST - 1 + IF( J.GT.ISTART ) THEN +- CTEMP = A( J, J-1 ) +- CALL CLARTG( CTEMP, A( J+1, J-1 ), C, S, A( J, J-1 ) ) +- A( J+1, J-1 ) = CZERO ++ CTEMP = H( J, J-1 ) ++ CALL CLARTG( CTEMP, H( J+1, J-1 ), C, S, H( J, J-1 ) ) ++ H( J+1, J-1 ) = CZERO + END IF + * + DO 100 JC = J, ILASTM +- CTEMP = C*A( J, JC ) + S*A( J+1, JC ) +- A( J+1, JC ) = -CONJG( S )*A( J, JC ) + C*A( J+1, JC ) +- A( J, JC ) = CTEMP +- CTEMP2 = C*B( J, JC ) + S*B( J+1, JC ) +- B( J+1, JC ) = -CONJG( S )*B( J, JC ) + C*B( J+1, JC ) +- B( J, JC ) = CTEMP2 ++ CTEMP = C*H( J, JC ) + S*H( J+1, JC ) ++ H( J+1, JC ) = -CONJG( S )*H( J, JC ) + C*H( J+1, JC ) ++ H( J, JC ) = CTEMP ++ CTEMP2 = C*T( J, JC ) + S*T( J+1, JC ) ++ T( J+1, JC ) = -CONJG( S )*T( J, JC ) + C*T( J+1, JC ) ++ T( J, JC ) = CTEMP2 + 100 CONTINUE + IF( ILQ ) THEN + DO 110 JR = 1, N +@@ -734,19 +759,19 @@ + 110 CONTINUE + END IF + * +- CTEMP = B( J+1, J+1 ) +- CALL CLARTG( CTEMP, B( J+1, J ), C, S, B( J+1, J+1 ) ) +- B( J+1, J ) = CZERO ++ CTEMP = T( J+1, J+1 ) ++ CALL CLARTG( CTEMP, T( J+1, J ), C, S, T( J+1, J+1 ) ) ++ T( J+1, J ) = CZERO + * + DO 120 JR = IFRSTM, MIN( J+2, ILAST ) +- CTEMP = C*A( JR, J+1 ) + S*A( JR, J ) +- A( JR, J ) = -CONJG( S )*A( JR, J+1 ) + C*A( JR, J ) +- A( JR, J+1 ) = CTEMP ++ CTEMP = C*H( JR, J+1 ) + S*H( JR, J ) ++ H( JR, J ) = -CONJG( S )*H( JR, J+1 ) + C*H( JR, J ) ++ H( JR, J+1 ) = CTEMP + 120 CONTINUE + DO 130 JR = IFRSTM, J +- CTEMP = C*B( JR, J+1 ) + S*B( JR, J ) +- B( JR, J ) = -CONJG( S )*B( JR, J+1 ) + C*B( JR, J ) +- B( JR, J+1 ) = CTEMP ++ CTEMP = C*T( JR, J+1 ) + S*T( JR, J ) ++ T( JR, J ) = -CONJG( S )*T( JR, J+1 ) + C*T( JR, J ) ++ T( JR, J+1 ) = CTEMP + 130 CONTINUE + IF( ILZ ) THEN + DO 140 JR = 1, N +@@ -792,18 +817,18 @@ + * Set Eigenvalues 1:ILO-1 + * + DO 200 J = 1, ILO - 1 +- ABSB = ABS( B( J, J ) ) ++ ABSB = ABS( T( J, J ) ) + IF( ABSB.GT.SAFMIN ) THEN +- SIGNBC = CONJG( B( J, J ) / ABSB ) +- B( J, J ) = ABSB ++ SIGNBC = CONJG( T( J, J ) / ABSB ) ++ T( J, J ) = ABSB + IF( ILSCHR ) THEN +- CALL CSCAL( J-1, SIGNBC, B( 1, J ), 1 ) +- CALL CSCAL( J, SIGNBC, A( 1, J ), 1 ) ++ CALL CSCAL( J-1, SIGNBC, T( 1, J ), 1 ) ++ CALL CSCAL( J, SIGNBC, H( 1, J ), 1 ) + * ----------------- Begin Timing Code --------------------- + OPST = OPST + REAL( 12*( J-1 ) ) + * ------------------ End Timing Code ---------------------- + ELSE +- A( J, J ) = A( J, J )*SIGNBC ++ H( J, J ) = H( J, J )*SIGNBC + END IF + IF( ILZ ) + $ CALL CSCAL( N, SIGNBC, Z( 1, J ), 1 ) +@@ -811,10 +836,10 @@ + OPST = OPST + REAL( 6*NZ+13 ) + * -------------------- End Timing Code ----------------------- + ELSE +- B( J, J ) = CZERO ++ T( J, J ) = CZERO + END IF +- ALPHA( J ) = A( J, J ) +- BETA( J ) = B( J, J ) ++ ALPHA( J ) = H( J, J ) ++ BETA( J ) = T( J, J ) + 200 CONTINUE + * + * Normal Termination +diff -uNr LAPACK.orig/TIMING/EIG/EIGSRC/ctgevc.f LAPACK/TIMING/EIG/EIGSRC/ctgevc.f +--- LAPACK.orig/TIMING/EIG/EIGSRC/ctgevc.f Thu Nov 4 14:28:30 1999 ++++ LAPACK/TIMING/EIG/EIGSRC/ctgevc.f Fri May 25 16:20:48 2001 +@@ -1,19 +1,19 @@ +- SUBROUTINE CTGEVC( SIDE, HOWMNY, SELECT, N, A, LDA, B, LDB, VL, ++ SUBROUTINE CTGEVC( SIDE, HOWMNY, SELECT, N, S, LDS, P, LDP, VL, + $ LDVL, VR, LDVR, MM, M, WORK, RWORK, INFO ) + * + * -- LAPACK routine (instrumented to count operations, version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* June 30, 1999 ++* May 4, 2001 + * + * .. Scalar Arguments .. + CHARACTER HOWMNY, SIDE +- INTEGER INFO, LDA, LDB, LDVL, LDVR, M, MM, N ++ INTEGER INFO, LDP, LDS, LDVL, LDVR, M, MM, N + * .. + * .. Array Arguments .. + LOGICAL SELECT( * ) + REAL RWORK( * ) +- COMPLEX A( LDA, * ), B( LDB, * ), VL( LDVL, * ), ++ COMPLEX P( LDP, * ), S( LDS, * ), VL( LDVL, * ), + $ VR( LDVR, * ), WORK( * ) + * .. + * +@@ -34,28 +34,30 @@ + * Purpose + * ======= + * +-* CTGEVC computes some or all of the right and/or left generalized +-* eigenvectors of a pair of complex upper triangular matrices (A,B). +-* +-* The right generalized eigenvector x and the left generalized +-* eigenvector y of (A,B) corresponding to a generalized eigenvalue +-* w are defined by: +-* +-* (A - wB) * x = 0 and y**H * (A - wB) = 0 +-* ++* CTGEVC computes some or all of the right and/or left eigenvectors of ++* a pair of complex matrices (S,P), where S and P are upper triangular. ++* Matrix pairs of this type are produced by the generalized Schur ++* factorization of a complex matrix pair (A,B): ++* ++* A = Q*S*Z**H, B = Q*P*Z**H ++* ++* as computed by CGGHRD + CHGEQZ. ++* ++* The right eigenvector x and the left eigenvector y of (S,P) ++* corresponding to an eigenvalue w are defined by: ++* ++* S*x = w*P*x, (y**H)*S = w*(y**H)*P, ++* + * where y**H denotes the conjugate tranpose of y. +-* +-* If an eigenvalue w is determined by zero diagonal elements of both A +-* and B, a unit vector is returned as the corresponding eigenvector. +-* +-* If all eigenvectors are requested, the routine may either return +-* the matrices X and/or Y of right or left eigenvectors of (A,B), or +-* the products Z*X and/or Q*Y, where Z and Q are input unitary +-* matrices. If (A,B) was obtained from the generalized Schur +-* factorization of an original pair of matrices +-* (A0,B0) = (Q*A*Z**H,Q*B*Z**H), +-* then Z*X and Q*Y are the matrices of right or left eigenvectors of +-* A. ++* The eigenvalues are not input to this routine, but are computed ++* directly from the diagonal elements of S and P. ++* ++* This routine returns the matrices X and/or Y of right and left ++* eigenvectors of (S,P), or the products Z*X and/or Q*Y, ++* where Z and Q are input matrices. ++* If Q and Z are the unitary factors from the generalized Schur ++* factorization of a matrix pair (A,B), then Z*X and Q*Y ++* are the matrices of right and left eigenvectors of (A,B). + * + * Arguments + * ========= +@@ -67,70 +69,69 @@ + * + * HOWMNY (input) CHARACTER*1 + * = 'A': compute all right and/or left eigenvectors; +-* = 'B': compute all right and/or left eigenvectors, and +-* backtransform them using the input matrices supplied +-* in VR and/or VL; ++* = 'B': compute all right and/or left eigenvectors, ++* backtransformed by the matrices in VR and/or VL; + * = 'S': compute selected right and/or left eigenvectors, + * specified by the logical array SELECT. + * + * SELECT (input) LOGICAL array, dimension (N) + * If HOWMNY='S', SELECT specifies the eigenvectors to be +-* computed. +-* If HOWMNY='A' or 'B', SELECT is not referenced. +-* To select the eigenvector corresponding to the j-th +-* eigenvalue, SELECT(j) must be set to .TRUE.. ++* computed. The eigenvector corresponding to the j-th ++* eigenvalue is computed if SELECT(j) = .TRUE.. ++* Not referenced if HOWMNY = 'A' or 'B'. + * + * N (input) INTEGER +-* The order of the matrices A and B. N >= 0. +-* +-* A (input) COMPLEX array, dimension (LDA,N) +-* The upper triangular matrix A. +-* +-* LDA (input) INTEGER +-* The leading dimension of array A. LDA >= max(1,N). ++* The order of the matrices S and P. N >= 0. + * +-* B (input) COMPLEX array, dimension (LDB,N) +-* The upper triangular matrix B. B must have real diagonal +-* elements. ++* S (input) COMPLEX array, dimension (LDS,N) ++* The upper triangular matrix S from a generalized Schur ++* factorization, as computed by CHGEQZ. ++* ++* LDS (input) INTEGER ++* The leading dimension of array S. LDS >= max(1,N). ++* ++* P (input) COMPLEX array, dimension (LDP,N) ++* The upper triangular matrix P from a generalized Schur ++* factorization, as computed by CHGEQZ. P must have real ++* diagonal elements. + * +-* LDB (input) INTEGER +-* The leading dimension of array B. LDB >= max(1,N). ++* LDP (input) INTEGER ++* The leading dimension of array P. LDP >= max(1,N). + * + * VL (input/output) COMPLEX array, dimension (LDVL,MM) + * On entry, if SIDE = 'L' or 'B' and HOWMNY = 'B', VL must + * contain an N-by-N matrix Q (usually the unitary matrix Q + * of left Schur vectors returned by CHGEQZ). + * On exit, if SIDE = 'L' or 'B', VL contains: +-* if HOWMNY = 'A', the matrix Y of left eigenvectors of (A,B); ++* if HOWMNY = 'A', the matrix Y of left eigenvectors of (S,P); + * if HOWMNY = 'B', the matrix Q*Y; +-* if HOWMNY = 'S', the left eigenvectors of (A,B) specified by ++* if HOWMNY = 'S', the left eigenvectors of (S,P) specified by + * SELECT, stored consecutively in the columns of + * VL, in the same order as their eigenvalues. +-* If SIDE = 'R', VL is not referenced. ++* Not referenced if SIDE = 'R'. + * + * LDVL (input) INTEGER +-* The leading dimension of array VL. +-* LDVL >= max(1,N) if SIDE = 'L' or 'B'; LDVL >= 1 otherwise. ++* The leading dimension of array VL. LDVL >= 1, and if ++* SIDE = 'L' or 'l' or 'B' or 'b', LDVL >= N. + * + * VR (input/output) COMPLEX array, dimension (LDVR,MM) + * On entry, if SIDE = 'R' or 'B' and HOWMNY = 'B', VR must + * contain an N-by-N matrix Q (usually the unitary matrix Z + * of right Schur vectors returned by CHGEQZ). + * On exit, if SIDE = 'R' or 'B', VR contains: +-* if HOWMNY = 'A', the matrix X of right eigenvectors of (A,B); ++* if HOWMNY = 'A', the matrix X of right eigenvectors of (S,P); + * if HOWMNY = 'B', the matrix Z*X; +-* if HOWMNY = 'S', the right eigenvectors of (A,B) specified by ++* if HOWMNY = 'S', the right eigenvectors of (S,P) specified by + * SELECT, stored consecutively in the columns of + * VR, in the same order as their eigenvalues. +-* If SIDE = 'L', VR is not referenced. ++* Not referenced if SIDE = 'L'. + * + * LDVR (input) INTEGER +-* The leading dimension of the array VR. +-* LDVR >= max(1,N) if SIDE = 'R' or 'B'; LDVR >= 1 otherwise. ++* The leading dimension of the array VR. LDVR >= 1, and if ++* SIDE = 'R' or 'B', LDVR >= N. + * + * MM (input) INTEGER +-* The leading dimension of the array VR. +-* LDVR >= max(1,N) if SIDE = 'R' or 'B'; LDVR >= 1 otherwise. ++* The number of columns in the arrays VL and/or VR. MM >= M. + * + * M (output) INTEGER + * The number of columns in the arrays VL and/or VR actually +@@ -194,7 +195,7 @@ + IHWMNY = 2 + ILALL = .FALSE. + ILBACK = .FALSE. +- ELSE IF( LSAME( HOWMNY, 'B' ) .OR. LSAME( HOWMNY, 'T' ) ) THEN ++ ELSE IF( LSAME( HOWMNY, 'B' ) ) THEN + IHWMNY = 3 + ILALL = .TRUE. + ILBACK = .TRUE. +@@ -225,9 +226,9 @@ + INFO = -2 + ELSE IF( N.LT.0 ) THEN + INFO = -4 +- ELSE IF( LDA.LT.MAX( 1, N ) ) THEN ++ ELSE IF( LDS.LT.MAX( 1, N ) ) THEN + INFO = -6 +- ELSE IF( LDB.LT.MAX( 1, N ) ) THEN ++ ELSE IF( LDP.LT.MAX( 1, N ) ) THEN + INFO = -8 + END IF + IF( INFO.NE.0 ) THEN +@@ -251,7 +252,7 @@ + * + ILBBAD = .FALSE. + DO 20 J = 1, N +- IF( AIMAG( B( J, J ) ).NE.ZERO ) ++ IF( AIMAG( P( J, J ) ).NE.ZERO ) + $ ILBBAD = .TRUE. + 20 CONTINUE + * +@@ -289,19 +290,19 @@ + * part of A and B to check for possible overflow in the triangular + * solver. + * +- ANORM = ABS1( A( 1, 1 ) ) +- BNORM = ABS1( B( 1, 1 ) ) ++ ANORM = ABS1( S( 1, 1 ) ) ++ BNORM = ABS1( P( 1, 1 ) ) + RWORK( 1 ) = ZERO + RWORK( N+1 ) = ZERO + DO 40 J = 2, N + RWORK( J ) = ZERO + RWORK( N+J ) = ZERO + DO 30 I = 1, J - 1 +- RWORK( J ) = RWORK( J ) + ABS1( A( I, J ) ) +- RWORK( N+J ) = RWORK( N+J ) + ABS1( B( I, J ) ) ++ RWORK( J ) = RWORK( J ) + ABS1( S( I, J ) ) ++ RWORK( N+J ) = RWORK( N+J ) + ABS1( P( I, J ) ) + 30 CONTINUE +- ANORM = MAX( ANORM, RWORK( J )+ABS1( A( J, J ) ) ) +- BNORM = MAX( BNORM, RWORK( N+J )+ABS1( B( J, J ) ) ) ++ ANORM = MAX( ANORM, RWORK( J )+ABS1( S( J, J ) ) ) ++ BNORM = MAX( BNORM, RWORK( N+J )+ABS1( P( J, J ) ) ) + 40 CONTINUE + * + ASCALE = ONE / MAX( ANORM, SAFMIN ) +@@ -326,8 +327,8 @@ + IF( ILCOMP ) THEN + IEIG = IEIG + 1 + * +- IF( ABS1( A( JE, JE ) ).LE.SAFMIN .AND. +- $ ABS( REAL( B( JE, JE ) ) ).LE.SAFMIN ) THEN ++ IF( ABS1( S( JE, JE ) ).LE.SAFMIN .AND. ++ $ ABS( REAL( P( JE, JE ) ) ).LE.SAFMIN ) THEN + * + * Singular matrix pencil -- return unit eigenvector + * +@@ -343,10 +344,10 @@ + * H + * y ( a A - b B ) = 0 + * +- TEMP = ONE / MAX( ABS1( A( JE, JE ) )*ASCALE, +- $ ABS( REAL( B( JE, JE ) ) )*BSCALE, SAFMIN ) +- SALPHA = ( TEMP*A( JE, JE ) )*ASCALE +- SBETA = ( TEMP*REAL( B( JE, JE ) ) )*BSCALE ++ TEMP = ONE / MAX( ABS1( S( JE, JE ) )*ASCALE, ++ $ ABS( REAL( P( JE, JE ) ) )*BSCALE, SAFMIN ) ++ SALPHA = ( TEMP*S( JE, JE ) )*ASCALE ++ SBETA = ( TEMP*REAL( P( JE, JE ) ) )*BSCALE + ACOEFF = SBETA*ASCALE + BCOEFF = SALPHA*BSCALE + * +@@ -403,7 +404,7 @@ + * + * Compute + * j-1 +-* SUM = sum conjg( a*A(k,j) - b*B(k,j) )*x(k) ++* SUM = sum conjg( a*S(k,j) - b*P(k,j) )*x(k) + * k=je + * (Scale if necessary) + * +@@ -422,16 +423,16 @@ + SUMB = CZERO + * + DO 80 JR = JE, J - 1 +- SUMA = SUMA + CONJG( A( JR, J ) )*WORK( JR ) +- SUMB = SUMB + CONJG( B( JR, J ) )*WORK( JR ) ++ SUMA = SUMA + CONJG( S( JR, J ) )*WORK( JR ) ++ SUMB = SUMB + CONJG( P( JR, J ) )*WORK( JR ) + 80 CONTINUE + SUM = ACOEFF*SUMA - CONJG( BCOEFF )*SUMB + * +-* Form x(j) = - SUM / conjg( a*A(j,j) - b*B(j,j) ) ++* Form x(j) = - SUM / conjg( a*S(j,j) - b*P(j,j) ) + * + * with scaling and perturbation of the denominator + * +- D = CONJG( ACOEFF*A( J, J )-BCOEFF*B( J, J ) ) ++ D = CONJG( ACOEFF*S( J, J )-BCOEFF*P( J, J ) ) + IF( ABS1( D ).LE.DMIN ) + $ D = CMPLX( DMIN ) + * +@@ -511,8 +512,8 @@ + IF( ILCOMP ) THEN + IEIG = IEIG - 1 + * +- IF( ABS1( A( JE, JE ) ).LE.SAFMIN .AND. +- $ ABS( REAL( B( JE, JE ) ) ).LE.SAFMIN ) THEN ++ IF( ABS1( S( JE, JE ) ).LE.SAFMIN .AND. ++ $ ABS( REAL( P( JE, JE ) ) ).LE.SAFMIN ) THEN + * + * Singular matrix pencil -- return unit eigenvector + * +@@ -528,10 +529,10 @@ + * + * ( a A - b B ) x = 0 + * +- TEMP = ONE / MAX( ABS1( A( JE, JE ) )*ASCALE, +- $ ABS( REAL( B( JE, JE ) ) )*BSCALE, SAFMIN ) +- SALPHA = ( TEMP*A( JE, JE ) )*ASCALE +- SBETA = ( TEMP*REAL( B( JE, JE ) ) )*BSCALE ++ TEMP = ONE / MAX( ABS1( S( JE, JE ) )*ASCALE, ++ $ ABS( REAL( P( JE, JE ) ) )*BSCALE, SAFMIN ) ++ SALPHA = ( TEMP*S( JE, JE ) )*ASCALE ++ SBETA = ( TEMP*REAL( P( JE, JE ) ) )*BSCALE + ACOEFF = SBETA*ASCALE + BCOEFF = SALPHA*BSCALE + * +@@ -584,7 +585,7 @@ + * WORK(j+1:JE) contains x + * + DO 170 JR = 1, JE - 1 +- WORK( JR ) = ACOEFF*A( JR, JE ) - BCOEFF*B( JR, JE ) ++ WORK( JR ) = ACOEFF*S( JR, JE ) - BCOEFF*P( JR, JE ) + 170 CONTINUE + WORK( JE ) = CONE + * +@@ -593,7 +594,7 @@ + * Form x(j) := - w(j) / d + * with scaling and perturbation of the denominator + * +- D = ACOEFF*A( J, J ) - BCOEFF*B( J, J ) ++ D = ACOEFF*S( J, J ) - BCOEFF*P( J, J ) + IF( ABS1( D ).LE.DMIN ) + $ D = CMPLX( DMIN ) + * +@@ -615,7 +616,7 @@ + * + IF( J.GT.1 ) THEN + * +-* w = w + x(j)*(a A(*,j) - b B(*,j) ) with scaling ++* w = w + x(j)*(a S(*,j) - b P(*,j) ) with scaling + * + IF( ABS1( WORK( J ) ).GT.ONE ) THEN + TEMP = ONE / ABS1( WORK( J ) ) +@@ -635,8 +636,8 @@ + CA = ACOEFF*WORK( J ) + CB = BCOEFF*WORK( J ) + DO 200 JR = 1, J - 1 +- WORK( JR ) = WORK( JR ) + CA*A( JR, J ) - +- $ CB*B( JR, J ) ++ WORK( JR ) = WORK( JR ) + CA*S( JR, J ) - ++ $ CB*P( JR, J ) + 200 CONTINUE + END IF + 210 CONTINUE +diff -uNr LAPACK.orig/TIMING/EIG/EIGSRC/ctrevc.f LAPACK/TIMING/EIG/EIGSRC/ctrevc.f +--- LAPACK.orig/TIMING/EIG/EIGSRC/ctrevc.f Thu Nov 4 14:28:30 1999 ++++ LAPACK/TIMING/EIG/EIGSRC/ctrevc.f Fri May 25 16:21:06 2001 +@@ -4,7 +4,7 @@ + * -- LAPACK routine (instrumented to count operations, version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* June 30, 1999 ++* May 7, 2001 + * + * .. Scalar Arguments .. + CHARACTER HOWMNY, SIDE +@@ -31,20 +31,23 @@ + * + * CTREVC computes some or all of the right and/or left eigenvectors of + * a complex upper triangular matrix T. +-* ++* Matrices of this type are produced by the Schur factorization of ++* a complex general matrix: A = Q*T*Q**H, as computed by CHSEQR. ++* + * The right eigenvector x and the left eigenvector y of T corresponding + * to an eigenvalue w are defined by: +-* +-* T*x = w*x, y'*T = w*y' +-* +-* where y' denotes the conjugate transpose of the vector y. +-* +-* If all eigenvectors are requested, the routine may either return the +-* matrices X and/or Y of right or left eigenvectors of T, or the +-* products Q*X and/or Q*Y, where Q is an input unitary +-* matrix. If T was obtained from the Schur factorization of an +-* original matrix A = Q*T*Q', then Q*X and Q*Y are the matrices of +-* right or left eigenvectors of A. ++* ++* T*x = w*x, (y**H)*T = w*(y**H) ++* ++* where y**H denotes the conjugate transpose of the vector y. ++* The eigenvalues are not input to this routine, but are read directly ++* from the diagonal of T. ++* ++* This routine returns the matrices X and/or Y of right and left ++* eigenvectors of T, or the products Q*X and/or Q*Y, where Q is an ++* input matrix. If Q is the unitary factor that reduces a matrix A to ++* Schur form T, then Q*X and Q*Y are the matrices of right and left ++* eigenvectors of A. + * + * Arguments + * ========= +@@ -57,17 +60,17 @@ + * HOWMNY (input) CHARACTER*1 + * = 'A': compute all right and/or left eigenvectors; + * = 'B': compute all right and/or left eigenvectors, +-* and backtransform them using the input matrices +-* supplied in VR and/or VL; ++* backtransformed using the matrices supplied in ++* VR and/or VL; + * = 'S': compute selected right and/or left eigenvectors, +-* specified by the logical array SELECT. ++* as indicated by the logical array SELECT. + * + * SELECT (input) LOGICAL array, dimension (N) + * If HOWMNY = 'S', SELECT specifies the eigenvectors to be + * computed. +-* If HOWMNY = 'A' or 'B', SELECT is not referenced. +-* To select the eigenvector corresponding to the j-th +-* eigenvalue, SELECT(j) must be set to .TRUE.. ++* The eigenvector corresponding to the j-th eigenvalue is ++* computed if SELECT(j) = .TRUE.. ++* Not referenced if HOWMNY = 'A' or 'B'. + * + * N (input) INTEGER + * The order of the matrix T. N >= 0. +@@ -85,19 +88,16 @@ + * Schur vectors returned by CHSEQR). + * On exit, if SIDE = 'L' or 'B', VL contains: + * if HOWMNY = 'A', the matrix Y of left eigenvectors of T; +-* VL is lower triangular. The i-th column +-* VL(i) of VL is the eigenvector corresponding +-* to T(i,i). + * if HOWMNY = 'B', the matrix Q*Y; + * if HOWMNY = 'S', the left eigenvectors of T specified by + * SELECT, stored consecutively in the columns + * of VL, in the same order as their + * eigenvalues. +-* If SIDE = 'R', VL is not referenced. ++* Not referenced if SIDE = 'R'. + * + * LDVL (input) INTEGER +-* The leading dimension of the array VL. LDVL >= max(1,N) if +-* SIDE = 'L' or 'B'; LDVL >= 1 otherwise. ++* The leading dimension of the array VL. LDVL >= 1, and if ++* SIDE = 'L' or 'B', LDVL >= N. + * + * VR (input/output) COMPLEX array, dimension (LDVR,MM) + * On entry, if SIDE = 'R' or 'B' and HOWMNY = 'B', VR must +@@ -105,19 +105,16 @@ + * Schur vectors returned by CHSEQR). + * On exit, if SIDE = 'R' or 'B', VR contains: + * if HOWMNY = 'A', the matrix X of right eigenvectors of T; +-* VR is upper triangular. The i-th column +-* VR(i) of VR is the eigenvector corresponding +-* to T(i,i). + * if HOWMNY = 'B', the matrix Q*X; + * if HOWMNY = 'S', the right eigenvectors of T specified by + * SELECT, stored consecutively in the columns + * of VR, in the same order as their + * eigenvalues. +-* If SIDE = 'L', VR is not referenced. ++* Not referenced if SIDE = 'L'. + * + * LDVR (input) INTEGER +-* The leading dimension of the array VR. LDVR >= max(1,N) if +-* SIDE = 'R' or 'B'; LDVR >= 1 otherwise. ++* The leading dimension of the array VR. LDVR >= 1, and if ++* SIDE = 'R' or 'B'; LDVR >= N. + * + * MM (input) INTEGER + * The number of columns in the arrays VL and/or VR. MM >= M. +diff -uNr LAPACK.orig/TIMING/EIG/EIGSRC/dbdsqr.f LAPACK/TIMING/EIG/EIGSRC/dbdsqr.f +--- LAPACK.orig/TIMING/EIG/EIGSRC/dbdsqr.f Thu Nov 4 14:28:31 1999 ++++ LAPACK/TIMING/EIG/EIGSRC/dbdsqr.f Fri May 25 16:19:53 2001 +@@ -4,7 +4,7 @@ + * -- LAPACK routine (instrumented to count operations, version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* October 31, 1999 ++* April 25, 2001 + * + * .. Scalar Arguments .. + CHARACTER UPLO +@@ -26,14 +26,26 @@ + * Purpose + * ======= + * +-* DBDSQR computes the singular value decomposition (SVD) of a real +-* N-by-N (upper or lower) bidiagonal matrix B: B = Q * S * P' (P' +-* denotes the transpose of P), where S is a diagonal matrix with +-* non-negative diagonal elements (the singular values of B), and Q +-* and P are orthogonal matrices. ++* DBDSQR computes the singular values and, optionally, the right and/or ++* left singular vectors from the singular value decomposition (SVD) of ++* a real N-by-N (upper or lower) bidiagonal matrix B using the implicit ++* zero-shift QR algorithm. The SVD of B has the form ++* ++* B = Q * S * P**T ++* ++* where S is the diagonal matrix of singular values, Q is an orthogonal ++* matrix of left singular vectors, and P is an orthogonal matrix of ++* right singular vectors. If left singular vectors are requested, this ++* subroutine actually returns U*Q instead of Q, and, if right singular ++* vectors are requested, this subroutine returns P**T*VT instead of ++* P**T, for given real input matrices U and VT. When U and VT are the ++* orthogonal matrices that reduce a general matrix A to bidiagonal ++* form: A = U*B*VT, as computed by DGEBRD, then + * +-* The routine computes S, and optionally computes U * Q, P' * VT, +-* or Q' * C, for given real input matrices U, VT, and C. ++* A = (U*Q) * S * (P**T*VT) ++* ++* is the SVD of A. Optionally, the subroutine may also compute Q**T*C ++* for a given real input matrix C. + * + * See "Computing Small Singular Values of Bidiagonal Matrices With + * Guaranteed High Relative Accuracy," by J. Demmel and W. Kahan, +@@ -69,18 +81,17 @@ + * order. + * + * E (input/output) DOUBLE PRECISION array, dimension (N) +-* On entry, the elements of E contain the +-* offdiagonal elements of the bidiagonal matrix whose SVD +-* is desired. On normal exit (INFO = 0), E is destroyed. +-* If the algorithm does not converge (INFO > 0), D and E ++* On entry, the N-1 offdiagonal elements of the bidiagonal ++* matrix B. ++* On exit, if INFO = 0, E is destroyed; if INFO > 0, D and E + * will contain the diagonal and superdiagonal elements of a + * bidiagonal matrix orthogonally equivalent to the one given + * as input. E(N) is used for workspace. + * + * VT (input/output) DOUBLE PRECISION array, dimension (LDVT, NCVT) + * On entry, an N-by-NCVT matrix VT. +-* On exit, VT is overwritten by P' * VT. +-* VT is not referenced if NCVT = 0. ++* On exit, VT is overwritten by P**T * VT. ++* Not referenced if NCVT = 0. + * + * LDVT (input) INTEGER + * The leading dimension of the array VT. +@@ -89,21 +100,22 @@ + * U (input/output) DOUBLE PRECISION array, dimension (LDU, N) + * On entry, an NRU-by-N matrix U. + * On exit, U is overwritten by U * Q. +-* U is not referenced if NRU = 0. ++* Not referenced if NRU = 0. + * + * LDU (input) INTEGER + * The leading dimension of the array U. LDU >= max(1,NRU). + * + * C (input/output) DOUBLE PRECISION array, dimension (LDC, NCC) + * On entry, an N-by-NCC matrix C. +-* On exit, C is overwritten by Q' * C. +-* C is not referenced if NCC = 0. ++* On exit, C is overwritten by Q**T * C. ++* Not referenced if NCC = 0. + * + * LDC (input) INTEGER + * The leading dimension of the array C. + * LDC >= max(1,N) if NCC > 0; LDC >=1 if NCC = 0. + * +-* WORK (workspace) DOUBLE PRECISION array, dimension (4*N) ++* WORK (workspace) DOUBLE PRECISION array, dimension (2*N) ++* if NCVT = NRU = NCC = 0, (max(1, 4*N-4)) otherwise + * + * INFO (output) INTEGER + * = 0: successful exit +diff -uNr LAPACK.orig/TIMING/EIG/EIGSRC/dgghrd.f LAPACK/TIMING/EIG/EIGSRC/dgghrd.f +--- LAPACK.orig/TIMING/EIG/EIGSRC/dgghrd.f Thu Nov 4 14:28:31 1999 ++++ LAPACK/TIMING/EIG/EIGSRC/dgghrd.f Fri May 25 16:20:14 2001 +@@ -4,7 +4,7 @@ + * -- LAPACK routine (instrumented to count operations, version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* September 30, 1994 ++* April 26, 2001 + * + * .. Scalar Arguments .. + CHARACTER COMPQ, COMPZ +@@ -33,16 +33,32 @@ + * + * DGGHRD reduces a pair of real matrices (A,B) to generalized upper + * Hessenberg form using orthogonal transformations, where A is a +-* general matrix and B is upper triangular: Q' * A * Z = H and +-* Q' * B * Z = T, where H is upper Hessenberg, T is upper triangular, +-* and Q and Z are orthogonal, and ' means transpose. ++* general matrix and B is upper triangular. The form of the ++* generalized eigenvalue problem is ++* A*x = lambda*B*x, ++* and B is typically made upper triangular by computing its QR ++* factorization and moving the orthogonal matrix Q to the left side ++* of the equation. ++* ++* This subroutine simultaneously reduces A to a Hessenberg matrix H: ++* Q**T*A*Z = H ++* and transforms B to another upper triangular matrix T: ++* Q**T*B*Z = T ++* in order to reduce the problem to its standard form ++* H*y = lambda*T*y ++* where y = Z**T*x. + * + * The orthogonal matrices Q and Z are determined as products of Givens + * rotations. They may either be formed explicitly, or they may be + * postmultiplied into input matrices Q1 and Z1, so that + * +-* Q1 * A * Z1' = (Q1*Q) * H * (Z1*Z)' +-* Q1 * B * Z1' = (Q1*Q) * T * (Z1*Z)' ++* Q1 * A * Z1**T = (Q1*Q) * H * (Z1*Z)**T ++* ++* Q1 * B * Z1**T = (Q1*Q) * T * (Z1*Z)**T ++* ++* If Q1 is the orthogonal matrix from the QR factorization of B in the ++* original equation A*x = lambda*B*x, then DGGHRD reduces the original ++* problem to generalized Hessenberg form. + * + * Arguments + * ========= +@@ -66,10 +82,11 @@ + * + * ILO (input) INTEGER + * IHI (input) INTEGER +-* It is assumed that A is already upper triangular in rows and +-* columns 1:ILO-1 and IHI+1:N. ILO and IHI are normally set +-* by a previous call to DGGBAL; otherwise they should be set +-* to 1 and N respectively. ++* ILO and IHI mark the rows and columns of A which are to be ++* reduced. It is assumed that A is already upper triangular ++* in rows and columns 1:ILO-1 and IHI+1:N. ILO and IHI are ++* normally set by a previous call to SGGBAL; otherwise they ++* should be set to 1 and N respectively. + * 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0. + * + * A (input/output) DOUBLE PRECISION array, dimension (LDA, N) +@@ -83,33 +100,28 @@ + * + * B (input/output) DOUBLE PRECISION array, dimension (LDB, N) + * On entry, the N-by-N upper triangular matrix B. +-* On exit, the upper triangular matrix T = Q' B Z. The ++* On exit, the upper triangular matrix T = Q**T B Z. The + * elements below the diagonal are set to zero. + * + * LDB (input) INTEGER + * The leading dimension of the array B. LDB >= max(1,N). + * + * Q (input/output) DOUBLE PRECISION array, dimension (LDQ, N) +-* If COMPQ='N': Q is not referenced. +-* If COMPQ='I': on entry, Q need not be set, and on exit it +-* contains the orthogonal matrix Q, where Q' +-* is the product of the Givens transformations +-* which are applied to A and B on the left. +-* If COMPQ='V': on entry, Q must contain an orthogonal matrix +-* Q1, and on exit this is overwritten by Q1*Q. ++* On entry, if COMPQ = 'V', the orthogonal matrix Q1, ++* typically from the QR factorization of B. ++* On exit, if COMPQ='I', the orthogonal matrix Q, and if ++* COMPQ = 'V', the product Q1*Q. ++* Not referenced if COMPQ='N'. + * + * LDQ (input) INTEGER + * The leading dimension of the array Q. + * LDQ >= N if COMPQ='V' or 'I'; LDQ >= 1 otherwise. + * + * Z (input/output) DOUBLE PRECISION array, dimension (LDZ, N) +-* If COMPZ='N': Z is not referenced. +-* If COMPZ='I': on entry, Z need not be set, and on exit it +-* contains the orthogonal matrix Z, which is +-* the product of the Givens transformations +-* which are applied to A and B on the right. +-* If COMPZ='V': on entry, Z must contain an orthogonal matrix +-* Z1, and on exit this is overwritten by Z1*Z. ++* On entry, if COMPZ = 'V', the orthogonal matrix Z1. ++* On exit, if COMPZ='I', the orthogonal matrix Z, and if ++* COMPZ = 'V', the product Z1*Z. ++* Not referenced if COMPZ='N'. + * + * LDZ (input) INTEGER + * The leading dimension of the array Z. +diff -uNr LAPACK.orig/TIMING/EIG/EIGSRC/dhgeqz.f LAPACK/TIMING/EIG/EIGSRC/dhgeqz.f +--- LAPACK.orig/TIMING/EIG/EIGSRC/dhgeqz.f Thu Nov 4 14:28:33 1999 ++++ LAPACK/TIMING/EIG/EIGSRC/dhgeqz.f Fri May 25 16:20:32 2001 +@@ -1,20 +1,20 @@ +- SUBROUTINE DHGEQZ( JOB, COMPQ, COMPZ, N, ILO, IHI, A, LDA, B, LDB, ++ SUBROUTINE DHGEQZ( JOB, COMPQ, COMPZ, N, ILO, IHI, H, LDH, T, LDT, + $ ALPHAR, ALPHAI, BETA, Q, LDQ, Z, LDZ, WORK, + $ LWORK, INFO ) + * + * -- LAPACK routine (instrumented to count operations, version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* June 30, 1999 ++* May 3, 2001 + * + * .. Scalar Arguments .. + CHARACTER COMPQ, COMPZ, JOB +- INTEGER IHI, ILO, INFO, LDA, LDB, LDQ, LDZ, LWORK, N ++ INTEGER IHI, ILO, INFO, LDH, LDQ, LDT, LDZ, LWORK, N + * .. + * .. Array Arguments .. +- DOUBLE PRECISION A( LDA, * ), ALPHAI( * ), ALPHAR( * ), +- $ B( LDB, * ), BETA( * ), Q( LDQ, * ), WORK( * ), +- $ Z( LDZ, * ) ++ DOUBLE PRECISION ALPHAI( * ), ALPHAR( * ), BETA( * ), ++ $ H( LDH, * ), Q( LDQ, * ), T( LDT, * ), ++ $ WORK( * ), Z( LDZ, * ) + * .. + * ---------------------- Begin Timing Code ------------------------- + * Common block to return operation count and iteration count +@@ -32,37 +32,56 @@ + * Purpose + * ======= + * +-* DHGEQZ implements a single-/double-shift version of the QZ method for +-* finding the generalized eigenvalues +-* +-* w(j)=(ALPHAR(j) + i*ALPHAI(j))/BETAR(j) of the equation +-* +-* det( A - w(i) B ) = 0 +-* +-* In addition, the pair A,B may be reduced to generalized Schur form: +-* B is upper triangular, and A is block upper triangular, where the +-* diagonal blocks are either 1-by-1 or 2-by-2, the 2-by-2 blocks having +-* complex generalized eigenvalues (see the description of the argument +-* JOB.) +-* +-* If JOB='S', then the pair (A,B) is simultaneously reduced to Schur +-* form by applying one orthogonal tranformation (usually called Q) on +-* the left and another (usually called Z) on the right. The 2-by-2 +-* upper-triangular diagonal blocks of B corresponding to 2-by-2 blocks +-* of A will be reduced to positive diagonal matrices. (I.e., +-* if A(j+1,j) is non-zero, then B(j+1,j)=B(j,j+1)=0 and B(j,j) and +-* B(j+1,j+1) will be positive.) +-* +-* If JOB='E', then at each iteration, the same transformations +-* are computed, but they are only applied to those parts of A and B +-* which are needed to compute ALPHAR, ALPHAI, and BETAR. +-* +-* If JOB='S' and COMPQ and COMPZ are 'V' or 'I', then the orthogonal +-* transformations used to reduce (A,B) are accumulated into the arrays +-* Q and Z s.t.: +-* +-* Q(in) A(in) Z(in)* = Q(out) A(out) Z(out)* +-* Q(in) B(in) Z(in)* = Q(out) B(out) Z(out)* ++* DHGEQZ computes the eigenvalues of a real matrix pair (H,T), ++* where H is an upper Hessenberg matrix and T is upper triangular, ++* using the double-shift QZ method. ++* Matrix pairs of this type are produced by the reduction to ++* generalized upper Hessenberg form of a real matrix pair (A,B): ++* ++* A = Q1*H*Z1**T, B = Q1*T*Z1**T, ++* ++* as computed by DGGHRD. ++* ++* If JOB='S', then the Hessenberg-triangular pair (H,T) is ++* also reduced to generalized Schur form, ++* ++* H = Q*S*Z**T, T = Q*P*Z**T, ++* ++* where Q and Z are orthogonal matrices, P is an upper triangular ++* matrix, and S is a quasi-triangular matrix with 1-by-1 and 2-by-2 ++* diagonal blocks. ++* ++* The 1-by-1 blocks correspond to real eigenvalues of the matrix pair ++* (H,T) and the 2-by-2 blocks correspond to complex conjugate pairs of ++* eigenvalues. ++* ++* Additionally, the 2-by-2 upper triangular diagonal blocks of P ++* corresponding to 2-by-2 blocks of S are reduced to positive diagonal ++* form, i.e., if S(j+1,j) is non-zero, then P(j+1,j) = P(j,j+1) = 0, ++* P(j,j) > 0, and P(j+1,j+1) > 0. ++* ++* Optionally, the orthogonal matrix Q from the generalized Schur ++* factorization may be postmultiplied into an input matrix Q1, and the ++* orthogonal matrix Z may be postmultiplied into an input matrix Z1. ++* If Q1 and Z1 are the orthogonal matrices from DGGHRD that reduced ++* the matrix pair (A,B) to generalized upper Hessenberg form, then the ++* output matrices Q1*Q and Z1*Z are the orthogonal factors from the ++* generalized Schur factorization of (A,B): ++* ++* A = (Q1*Q)*S*(Z1*Z)**T, B = (Q1*Q)*P*(Z1*Z)**T. ++* ++* To avoid overflow, eigenvalues of the matrix pair (H,T) (equivalently, ++* of (A,B)) are computed as a pair of values (alpha,beta), where alpha is ++* complex and beta real. ++* If beta is nonzero, lambda = alpha / beta is an eigenvalue of the ++* generalized nonsymmetric eigenvalue problem (GNEP) ++* A*x = lambda*B*x ++* and if alpha is nonzero, mu = beta / alpha is an eigenvalue of the ++* alternate form of the GNEP ++* mu*A*y = B*y. ++* Real eigenvalues can be read directly from the generalized Schur ++* form: ++* alpha = S(i,i), beta = P(i,i). + * + * Ref: C.B. Moler & G.W. Stewart, "An Algorithm for Generalized Matrix + * Eigenvalue Problems", SIAM J. Numer. Anal., 10(1973), +@@ -72,114 +91,98 @@ + * ========= + * + * JOB (input) CHARACTER*1 +-* = 'E': compute only ALPHAR, ALPHAI, and BETA. A and B will +-* not necessarily be put into generalized Schur form. +-* = 'S': put A and B into generalized Schur form, as well +-* as computing ALPHAR, ALPHAI, and BETA. ++* = 'E': Compute eigenvalues only; ++* = 'S': Compute eigenvalues and the Schur form. + * + * COMPQ (input) CHARACTER*1 +-* = 'N': do not modify Q. +-* = 'V': multiply the array Q on the right by the transpose of +-* the orthogonal tranformation that is applied to the +-* left side of A and B to reduce them to Schur form. +-* = 'I': like COMPQ='V', except that Q will be initialized to +-* the identity first. ++* = 'N': Left Schur vectors (Q) are not computed; ++* = 'I': Q is initialized to the unit matrix and the matrix Q ++* of left Schur vectors of (H,T) is returned; ++* = 'V': Q must contain an orthogonal matrix Q1 on entry and ++* the product Q1*Q is returned. + * + * COMPZ (input) CHARACTER*1 +-* = 'N': do not modify Z. +-* = 'V': multiply the array Z on the right by the orthogonal +-* tranformation that is applied to the right side of +-* A and B to reduce them to Schur form. +-* = 'I': like COMPZ='V', except that Z will be initialized to +-* the identity first. ++* = 'N': Right Schur vectors (Z) are not computed; ++* = 'I': Z is initialized to the unit matrix and the matrix Z ++* of right Schur vectors of (H,T) is returned; ++* = 'V': Z must contain an orthogonal matrix Z1 on entry and ++* the product Z1*Z is returned. + * + * N (input) INTEGER +-* The order of the matrices A, B, Q, and Z. N >= 0. ++* The order of the matrices H, T, Q, and Z. N >= 0. + * + * ILO (input) INTEGER + * IHI (input) INTEGER +-* It is assumed that A is already upper triangular in rows and +-* columns 1:ILO-1 and IHI+1:N. +-* 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0. +-* +-* A (input/output) DOUBLE PRECISION array, dimension (LDA, N) +-* On entry, the N-by-N upper Hessenberg matrix A. Elements +-* below the subdiagonal must be zero. +-* If JOB='S', then on exit A and B will have been +-* simultaneously reduced to generalized Schur form. +-* If JOB='E', then on exit A will have been destroyed. +-* The diagonal blocks will be correct, but the off-diagonal +-* portion will be meaningless. +-* +-* LDA (input) INTEGER +-* The leading dimension of the array A. LDA >= max( 1, N ). +-* +-* B (input/output) DOUBLE PRECISION array, dimension (LDB, N) +-* On entry, the N-by-N upper triangular matrix B. Elements +-* below the diagonal must be zero. 2-by-2 blocks in B +-* corresponding to 2-by-2 blocks in A will be reduced to +-* positive diagonal form. (I.e., if A(j+1,j) is non-zero, +-* then B(j+1,j)=B(j,j+1)=0 and B(j,j) and B(j+1,j+1) will be +-* positive.) +-* If JOB='S', then on exit A and B will have been +-* simultaneously reduced to Schur form. +-* If JOB='E', then on exit B will have been destroyed. +-* Elements corresponding to diagonal blocks of A will be +-* correct, but the off-diagonal portion will be meaningless. ++* ILO and IHI mark the rows and columns of H which are in ++* Hessenberg form. It is assumed that A is already upper ++* triangular in rows and columns 1:ILO-1 and IHI+1:N. ++* If N > 0, 1 <= ILO <= IHI <= N; if N = 0, ILO=1 and IHI=0. ++* ++* H (input/output) DOUBLE PRECISION array, dimension (LDH, N) ++* On entry, the N-by-N upper Hessenberg matrix H. ++* On exit, if JOB = 'S', H contains the upper quasi-triangular ++* matrix S from the generalized Schur factorization; ++* 2-by-2 diagonal blocks (corresponding to complex conjugate ++* pairs of eigenvalues) are returned in standard form, with ++* H(i,i) = H(i+1,i+1) and H(i+1,i)*H(i,i+1) < 0. ++* If JOB = 'E', the diagonal blocks of H match those of S, but ++* the rest of H is unspecified. ++* ++* LDH (input) INTEGER ++* The leading dimension of the array H. LDH >= max( 1, N ). ++* ++* T (input/output) DOUBLE PRECISION array, dimension (LDT, N) ++* On entry, the N-by-N upper triangular matrix T. ++* On exit, if JOB = 'S', T contains the upper triangular ++* matrix P from the generalized Schur factorization; ++* 2-by-2 diagonal blocks of P corresponding to 2-by-2 blocks of S ++* are reduced to positive diagonal form, i.e., if H(j+1,j) is ++* non-zero, then T(j+1,j) = T(j,j+1) = 0, T(j,j) > 0, and ++* T(j+1,j+1) > 0. ++* If JOB = 'E', the diagonal blocks of T match those of P, but ++* the rest of T is unspecified. + * +-* LDB (input) INTEGER +-* The leading dimension of the array B. LDB >= max( 1, N ). ++* LDT (input) INTEGER ++* The leading dimension of the array T. LDT >= max( 1, N ). + * + * ALPHAR (output) DOUBLE PRECISION array, dimension (N) +-* ALPHAR(1:N) will be set to real parts of the diagonal +-* elements of A that would result from reducing A and B to +-* Schur form and then further reducing them both to triangular +-* form using unitary transformations s.t. the diagonal of B +-* was non-negative real. Thus, if A(j,j) is in a 1-by-1 block +-* (i.e., A(j+1,j)=A(j,j+1)=0), then ALPHAR(j)=A(j,j). +-* Note that the (real or complex) values +-* (ALPHAR(j) + i*ALPHAI(j))/BETA(j), j=1,...,N, are the +-* generalized eigenvalues of the matrix pencil A - wB. ++* The real parts of each scalar alpha defining an eigenvalue ++* of GNEP. + * + * ALPHAI (output) DOUBLE PRECISION array, dimension (N) +-* ALPHAI(1:N) will be set to imaginary parts of the diagonal +-* elements of A that would result from reducing A and B to +-* Schur form and then further reducing them both to triangular +-* form using unitary transformations s.t. the diagonal of B +-* was non-negative real. Thus, if A(j,j) is in a 1-by-1 block +-* (i.e., A(j+1,j)=A(j,j+1)=0), then ALPHAR(j)=0. +-* Note that the (real or complex) values +-* (ALPHAR(j) + i*ALPHAI(j))/BETA(j), j=1,...,N, are the +-* generalized eigenvalues of the matrix pencil A - wB. ++* The imaginary parts of each scalar alpha defining an ++* eigenvalue of GNEP. ++* If ALPHAI(j) is zero, then the j-th eigenvalue is real; if ++* positive, then the j-th and (j+1)-st eigenvalues are a ++* complex conjugate pair, with ALPHAI(j+1) = -ALPHAI(j). + * + * BETA (output) DOUBLE PRECISION array, dimension (N) +-* BETA(1:N) will be set to the (real) diagonal elements of B +-* that would result from reducing A and B to Schur form and +-* then further reducing them both to triangular form using +-* unitary transformations s.t. the diagonal of B was +-* non-negative real. Thus, if A(j,j) is in a 1-by-1 block +-* (i.e., A(j+1,j)=A(j,j+1)=0), then BETA(j)=B(j,j). +-* Note that the (real or complex) values +-* (ALPHAR(j) + i*ALPHAI(j))/BETA(j), j=1,...,N, are the +-* generalized eigenvalues of the matrix pencil A - wB. +-* (Note that BETA(1:N) will always be non-negative, and no +-* BETAI is necessary.) ++* The scalars beta that define the eigenvalues of GNEP. ++* Together, the quantities alpha = (ALPHAR(j),ALPHAI(j)) and ++* beta = BETA(j) represent the j-th eigenvalue of the matrix ++* pair (A,B), in one of the forms lambda = alpha/beta or ++* mu = beta/alpha. Since either lambda or mu may overflow, ++* they should not, in general, be computed. + * + * Q (input/output) DOUBLE PRECISION array, dimension (LDQ, N) +-* If COMPQ='N', then Q will not be referenced. +-* If COMPQ='V' or 'I', then the transpose of the orthogonal +-* transformations which are applied to A and B on the left +-* will be applied to the array Q on the right. ++* On entry, if COMPZ = 'V', the orthogonal matrix Q1 used in ++* the reduction of (A,B) to generalized Hessenberg form. ++* On exit, if COMPZ = 'I', the orthogonal matrix of left Schur ++* vectors of (H,T), and if COMPZ = 'V', the orthogonal matrix ++* of left Schur vectors of (A,B). ++* Not referenced if COMPZ = 'N'. + * + * LDQ (input) INTEGER + * The leading dimension of the array Q. LDQ >= 1. + * If COMPQ='V' or 'I', then LDQ >= N. + * + * Z (input/output) DOUBLE PRECISION array, dimension (LDZ, N) +-* If COMPZ='N', then Z will not be referenced. +-* If COMPZ='V' or 'I', then the orthogonal transformations +-* which are applied to A and B on the right will be applied +-* to the array Z on the right. ++* On entry, if COMPZ = 'V', the orthogonal matrix Z1 used in ++* the reduction of (A,B) to generalized Hessenberg form. ++* On exit, if COMPZ = 'I', the orthogonal matrix of ++* right Schur vectors of (H,T), and if COMPZ = 'V', the ++* orthogonal matrix of right Schur vectors of (A,B). ++* Not referenced if COMPZ = 'N'. + * + * LDZ (input) INTEGER + * The leading dimension of the array Z. LDZ >= 1. +@@ -199,13 +202,12 @@ + * INFO (output) INTEGER + * = 0: successful exit + * < 0: if INFO = -i, the i-th argument had an illegal value +-* = 1,...,N: the QZ iteration did not converge. (A,B) is not ++* = 1,...,N: the QZ iteration did not converge. (H,T) is not + * in Schur form, but ALPHAR(i), ALPHAI(i), and + * BETA(i), i=INFO+1,...,N should be correct. +-* = N+1,...,2*N: the shift calculation failed. (A,B) is not ++* = N+1,...,2*N: the shift calculation failed. (H,T) is not + * in Schur form, but ALPHAR(i), ALPHAI(i), and + * BETA(i), i=INFO-N+1,...,N should be correct. +-* > 2*N: various "impossible" errors. + * + * Further Details + * =============== +@@ -237,7 +239,7 @@ + $ BTOL, C, C11I, C11R, C12, C21, C22I, C22R, CL, + $ CQ, CR, CZ, ESHIFT, OPST, S, S1, S1INV, S2, + $ SAFMAX, SAFMIN, SCALE, SL, SQI, SQR, SR, SZI, +- $ SZR, T, TAU, TEMP, TEMP2, TEMPI, TEMPR, U1, ++ $ SZR, T1, TAU, TEMP, TEMP2, TEMPI, TEMPR, U1, + $ U12, U12L, U2, ULP, VS, W11, W12, W21, W22, + $ WABS, WI, WR, WR2 + * .. +@@ -319,9 +321,9 @@ + INFO = -5 + ELSE IF( IHI.GT.N .OR. IHI.LT.ILO-1 ) THEN + INFO = -6 +- ELSE IF( LDA.LT.N ) THEN ++ ELSE IF( LDH.LT.N ) THEN + INFO = -8 +- ELSE IF( LDB.LT.N ) THEN ++ ELSE IF( LDT.LT.N ) THEN + INFO = -10 + ELSE IF( LDQ.LT.1 .OR. ( ILQ .AND. LDQ.LT.N ) ) THEN + INFO = -15 +@@ -360,8 +362,8 @@ + SAFMIN = DLAMCH( 'S' ) + SAFMAX = ONE / SAFMIN + ULP = DLAMCH( 'E' )*DLAMCH( 'B' ) +- ANORM = DLANHS( 'F', IN, A( ILO, ILO ), LDA, WORK ) +- BNORM = DLANHS( 'F', IN, B( ILO, ILO ), LDB, WORK ) ++ ANORM = DLANHS( 'F', IN, H( ILO, ILO ), LDH, WORK ) ++ BNORM = DLANHS( 'F', IN, T( ILO, ILO ), LDT, WORK ) + ATOL = MAX( SAFMIN, ULP*ANORM ) + BTOL = MAX( SAFMIN, ULP*BNORM ) + ASCALE = ONE / MAX( SAFMIN, ANORM ) +@@ -370,15 +372,15 @@ + * Set Eigenvalues IHI+1:N + * + DO 30 J = IHI + 1, N +- IF( B( J, J ).LT.ZERO ) THEN ++ IF( T( J, J ).LT.ZERO ) THEN + IF( ILSCHR ) THEN + DO 10 JR = 1, J +- A( JR, J ) = -A( JR, J ) +- B( JR, J ) = -B( JR, J ) ++ H( JR, J ) = -H( JR, J ) ++ T( JR, J ) = -T( JR, J ) + 10 CONTINUE + ELSE +- A( J, J ) = -A( J, J ) +- B( J, J ) = -B( J, J ) ++ H( J, J ) = -H( J, J ) ++ T( J, J ) = -T( J, J ) + END IF + IF( ILZ ) THEN + DO 20 JR = 1, N +@@ -386,9 +388,9 @@ + 20 CONTINUE + END IF + END IF +- ALPHAR( J ) = A( J, J ) ++ ALPHAR( J ) = H( J, J ) + ALPHAI( J ) = ZERO +- BETA( J ) = B( J, J ) ++ BETA( J ) = T( J, J ) + 30 CONTINUE + * + * ---------------------- Begin Timing Code ------------------------- +@@ -435,8 +437,8 @@ + * Split the matrix if possible. + * + * Two tests: +-* 1: A(j,j-1)=0 or j=ILO +-* 2: B(j,j)=0 ++* 1: H(j,j-1)=0 or j=ILO ++* 2: T(j,j)=0 + * + IF( ILAST.EQ.ILO ) THEN + * +@@ -444,14 +446,14 @@ + * + GO TO 80 + ELSE +- IF( ABS( A( ILAST, ILAST-1 ) ).LE.ATOL ) THEN +- A( ILAST, ILAST-1 ) = ZERO ++ IF( ABS( H( ILAST, ILAST-1 ) ).LE.ATOL ) THEN ++ H( ILAST, ILAST-1 ) = ZERO + GO TO 80 + END IF + END IF + * +- IF( ABS( B( ILAST, ILAST ) ).LE.BTOL ) THEN +- B( ILAST, ILAST ) = ZERO ++ IF( ABS( T( ILAST, ILAST ) ).LE.BTOL ) THEN ++ T( ILAST, ILAST ) = ZERO + GO TO 70 + END IF + * +@@ -459,36 +461,36 @@ + * + DO 60 J = ILAST - 1, ILO, -1 + * +-* Test 1: for A(j,j-1)=0 or j=ILO ++* Test 1: for H(j,j-1)=0 or j=ILO + * + IF( J.EQ.ILO ) THEN + ILAZRO = .TRUE. + ELSE +- IF( ABS( A( J, J-1 ) ).LE.ATOL ) THEN +- A( J, J-1 ) = ZERO ++ IF( ABS( H( J, J-1 ) ).LE.ATOL ) THEN ++ H( J, J-1 ) = ZERO + ILAZRO = .TRUE. + ELSE + ILAZRO = .FALSE. + END IF + END IF + * +-* Test 2: for B(j,j)=0 ++* Test 2: for T(j,j)=0 + * +- IF( ABS( B( J, J ) ).LT.BTOL ) THEN +- B( J, J ) = ZERO ++ IF( ABS( T( J, J ) ).LT.BTOL ) THEN ++ T( J, J ) = ZERO + * + * Test 1a: Check for 2 consecutive small subdiagonals in A + * + ILAZR2 = .FALSE. + IF( .NOT.ILAZRO ) THEN +- TEMP = ABS( A( J, J-1 ) ) +- TEMP2 = ABS( A( J, J ) ) ++ TEMP = ABS( H( J, J-1 ) ) ++ TEMP2 = ABS( H( J, J ) ) + TEMPR = MAX( TEMP, TEMP2 ) + IF( TEMPR.LT.ONE .AND. TEMPR.NE.ZERO ) THEN + TEMP = TEMP / TEMPR + TEMP2 = TEMP2 / TEMPR + END IF +- IF( TEMP*( ASCALE*ABS( A( J+1, J ) ) ).LE.TEMP2* ++ IF( TEMP*( ASCALE*ABS( H( J+1, J ) ) ).LE.TEMP2* + $ ( ASCALE*ATOL ) )ILAZR2 = .TRUE. + END IF + * +@@ -500,26 +502,26 @@ + * + IF( ILAZRO .OR. ILAZR2 ) THEN + DO 40 JCH = J, ILAST - 1 +- TEMP = A( JCH, JCH ) +- CALL DLARTG( TEMP, A( JCH+1, JCH ), C, S, +- $ A( JCH, JCH ) ) +- A( JCH+1, JCH ) = ZERO +- CALL DROT( ILASTM-JCH, A( JCH, JCH+1 ), LDA, +- $ A( JCH+1, JCH+1 ), LDA, C, S ) +- CALL DROT( ILASTM-JCH, B( JCH, JCH+1 ), LDB, +- $ B( JCH+1, JCH+1 ), LDB, C, S ) ++ TEMP = H( JCH, JCH ) ++ CALL DLARTG( TEMP, H( JCH+1, JCH ), C, S, ++ $ H( JCH, JCH ) ) ++ H( JCH+1, JCH ) = ZERO ++ CALL DROT( ILASTM-JCH, H( JCH, JCH+1 ), LDH, ++ $ H( JCH+1, JCH+1 ), LDH, C, S ) ++ CALL DROT( ILASTM-JCH, T( JCH, JCH+1 ), LDT, ++ $ T( JCH+1, JCH+1 ), LDT, C, S ) + IF( ILQ ) + $ CALL DROT( N, Q( 1, JCH ), 1, Q( 1, JCH+1 ), 1, + $ C, S ) + IF( ILAZR2 ) +- $ A( JCH, JCH-1 ) = A( JCH, JCH-1 )*C ++ $ H( JCH, JCH-1 ) = H( JCH, JCH-1 )*C + ILAZR2 = .FALSE. + * + * --------------- Begin Timing Code ----------------- + OPST = OPST + DBLE( 7+12*( ILASTM-JCH )+6*NQ ) + * ---------------- End Timing Code ------------------ + * +- IF( ABS( B( JCH+1, JCH+1 ) ).GE.BTOL ) THEN ++ IF( ABS( T( JCH+1, JCH+1 ) ).GE.BTOL ) THEN + IF( JCH+1.GE.ILAST ) THEN + GO TO 80 + ELSE +@@ -527,35 +529,35 @@ + GO TO 110 + END IF + END IF +- B( JCH+1, JCH+1 ) = ZERO ++ T( JCH+1, JCH+1 ) = ZERO + 40 CONTINUE + GO TO 70 + ELSE + * +-* Only test 2 passed -- chase the zero to B(ILAST,ILAST) +-* Then process as in the case B(ILAST,ILAST)=0 ++* Only test 2 passed -- chase the zero to T(ILAST,ILAST) ++* Then process as in the case T(ILAST,ILAST)=0 + * + DO 50 JCH = J, ILAST - 1 +- TEMP = B( JCH, JCH+1 ) +- CALL DLARTG( TEMP, B( JCH+1, JCH+1 ), C, S, +- $ B( JCH, JCH+1 ) ) +- B( JCH+1, JCH+1 ) = ZERO ++ TEMP = T( JCH, JCH+1 ) ++ CALL DLARTG( TEMP, T( JCH+1, JCH+1 ), C, S, ++ $ T( JCH, JCH+1 ) ) ++ T( JCH+1, JCH+1 ) = ZERO + IF( JCH.LT.ILASTM-1 ) +- $ CALL DROT( ILASTM-JCH-1, B( JCH, JCH+2 ), LDB, +- $ B( JCH+1, JCH+2 ), LDB, C, S ) +- CALL DROT( ILASTM-JCH+2, A( JCH, JCH-1 ), LDA, +- $ A( JCH+1, JCH-1 ), LDA, C, S ) ++ $ CALL DROT( ILASTM-JCH-1, T( JCH, JCH+2 ), LDT, ++ $ T( JCH+1, JCH+2 ), LDT, C, S ) ++ CALL DROT( ILASTM-JCH+2, H( JCH, JCH-1 ), LDH, ++ $ H( JCH+1, JCH-1 ), LDH, C, S ) + IF( ILQ ) + $ CALL DROT( N, Q( 1, JCH ), 1, Q( 1, JCH+1 ), 1, + $ C, S ) +- TEMP = A( JCH+1, JCH ) +- CALL DLARTG( TEMP, A( JCH+1, JCH-1 ), C, S, +- $ A( JCH+1, JCH ) ) +- A( JCH+1, JCH-1 ) = ZERO +- CALL DROT( JCH+1-IFRSTM, A( IFRSTM, JCH ), 1, +- $ A( IFRSTM, JCH-1 ), 1, C, S ) +- CALL DROT( JCH-IFRSTM, B( IFRSTM, JCH ), 1, +- $ B( IFRSTM, JCH-1 ), 1, C, S ) ++ TEMP = H( JCH+1, JCH ) ++ CALL DLARTG( TEMP, H( JCH+1, JCH-1 ), C, S, ++ $ H( JCH+1, JCH ) ) ++ H( JCH+1, JCH-1 ) = ZERO ++ CALL DROT( JCH+1-IFRSTM, H( IFRSTM, JCH ), 1, ++ $ H( IFRSTM, JCH-1 ), 1, C, S ) ++ CALL DROT( JCH-IFRSTM, T( IFRSTM, JCH ), 1, ++ $ T( IFRSTM, JCH-1 ), 1, C, S ) + IF( ILZ ) + $ CALL DROT( N, Z( 1, JCH ), 1, Z( 1, JCH-1 ), 1, + $ C, S ) +@@ -585,18 +587,18 @@ + INFO = N + 1 + GO TO 420 + * +-* B(ILAST,ILAST)=0 -- clear A(ILAST,ILAST-1) to split off a ++* T(ILAST,ILAST)=0 -- clear H(ILAST,ILAST-1) to split off a + * 1x1 block. + * + 70 CONTINUE +- TEMP = A( ILAST, ILAST ) +- CALL DLARTG( TEMP, A( ILAST, ILAST-1 ), C, S, +- $ A( ILAST, ILAST ) ) +- A( ILAST, ILAST-1 ) = ZERO +- CALL DROT( ILAST-IFRSTM, A( IFRSTM, ILAST ), 1, +- $ A( IFRSTM, ILAST-1 ), 1, C, S ) +- CALL DROT( ILAST-IFRSTM, B( IFRSTM, ILAST ), 1, +- $ B( IFRSTM, ILAST-1 ), 1, C, S ) ++ TEMP = H( ILAST, ILAST ) ++ CALL DLARTG( TEMP, H( ILAST, ILAST-1 ), C, S, ++ $ H( ILAST, ILAST ) ) ++ H( ILAST, ILAST-1 ) = ZERO ++ CALL DROT( ILAST-IFRSTM, H( IFRSTM, ILAST ), 1, ++ $ H( IFRSTM, ILAST-1 ), 1, C, S ) ++ CALL DROT( ILAST-IFRSTM, T( IFRSTM, ILAST ), 1, ++ $ T( IFRSTM, ILAST-1 ), 1, C, S ) + IF( ILZ ) + $ CALL DROT( N, Z( 1, ILAST ), 1, Z( 1, ILAST-1 ), 1, C, S ) + * +@@ -605,19 +607,19 @@ + * ---------------------- End Timing Code ------------------------ + * + * +-* A(ILAST,ILAST-1)=0 -- Standardize B, set ALPHAR, ALPHAI, ++* H(ILAST,ILAST-1)=0 -- Standardize B, set ALPHAR, ALPHAI, + * and BETA + * + 80 CONTINUE +- IF( B( ILAST, ILAST ).LT.ZERO ) THEN ++ IF( T( ILAST, ILAST ).LT.ZERO ) THEN + IF( ILSCHR ) THEN + DO 90 J = IFRSTM, ILAST +- A( J, ILAST ) = -A( J, ILAST ) +- B( J, ILAST ) = -B( J, ILAST ) ++ H( J, ILAST ) = -H( J, ILAST ) ++ T( J, ILAST ) = -T( J, ILAST ) + 90 CONTINUE + ELSE +- A( ILAST, ILAST ) = -A( ILAST, ILAST ) +- B( ILAST, ILAST ) = -B( ILAST, ILAST ) ++ H( ILAST, ILAST ) = -H( ILAST, ILAST ) ++ T( ILAST, ILAST ) = -T( ILAST, ILAST ) + END IF + IF( ILZ ) THEN + DO 100 J = 1, N +@@ -625,9 +627,9 @@ + 100 CONTINUE + END IF + END IF +- ALPHAR( ILAST ) = A( ILAST, ILAST ) ++ ALPHAR( ILAST ) = H( ILAST, ILAST ) + ALPHAI( ILAST ) = ZERO +- BETA( ILAST ) = B( ILAST, ILAST ) ++ BETA( ILAST ) = T( ILAST, ILAST ) + * + * Go to next block -- exit if finished. + * +@@ -660,7 +662,7 @@ + * Compute single shifts. + * + * At this point, IFIRST < ILAST, and the diagonal elements of +-* B(IFIRST:ILAST,IFIRST,ILAST) are larger than BTOL (in ++* T(IFIRST:ILAST,IFIRST,ILAST) are larger than BTOL (in + * magnitude) + * + IF( ( IITER / 10 )*10.EQ.IITER ) THEN +@@ -668,10 +670,10 @@ + * Exceptional shift. Chosen for no particularly good reason. + * (Single shift only.) + * +- IF( ( DBLE( MAXIT )*SAFMIN )*ABS( A( ILAST-1, ILAST ) ).LT. +- $ ABS( B( ILAST-1, ILAST-1 ) ) ) THEN +- ESHIFT = ESHIFT + A( ILAST-1, ILAST ) / +- $ B( ILAST-1, ILAST-1 ) ++ IF( ( DBLE( MAXIT )*SAFMIN )*ABS( H( ILAST-1, ILAST ) ).LT. ++ $ ABS( T( ILAST-1, ILAST-1 ) ) ) THEN ++ ESHIFT = ESHIFT + H( ILAST-1, ILAST ) / ++ $ T( ILAST-1, ILAST-1 ) + ELSE + ESHIFT = ESHIFT + ONE / ( SAFMIN*DBLE( MAXIT ) ) + END IF +@@ -688,8 +690,8 @@ + * bottom-right 2x2 block of A and B. The first eigenvalue + * returned by DLAG2 is the Wilkinson shift (AEP p.512), + * +- CALL DLAG2( A( ILAST-1, ILAST-1 ), LDA, +- $ B( ILAST-1, ILAST-1 ), LDB, SAFMIN*SAFETY, S1, ++ CALL DLAG2( H( ILAST-1, ILAST-1 ), LDH, ++ $ T( ILAST-1, ILAST-1 ), LDT, SAFMIN*SAFETY, S1, + $ S2, WR, WR2, WI ) + * + TEMP = MAX( S1, SAFMIN*MAX( ONE, ABS( WR ), ABS( WI ) ) ) +@@ -721,14 +723,14 @@ + * + DO 120 J = ILAST - 1, IFIRST + 1, -1 + ISTART = J +- TEMP = ABS( S1*A( J, J-1 ) ) +- TEMP2 = ABS( S1*A( J, J )-WR*B( J, J ) ) ++ TEMP = ABS( S1*H( J, J-1 ) ) ++ TEMP2 = ABS( S1*H( J, J )-WR*T( J, J ) ) + TEMPR = MAX( TEMP, TEMP2 ) + IF( TEMPR.LT.ONE .AND. TEMPR.NE.ZERO ) THEN + TEMP = TEMP / TEMPR + TEMP2 = TEMP2 / TEMPR + END IF +- IF( ABS( ( ASCALE*A( J+1, J ) )*TEMP ).LE.( ASCALE*ATOL )* ++ IF( ABS( ( ASCALE*H( J+1, J ) )*TEMP ).LE.( ASCALE*ATOL )* + $ TEMP2 )GO TO 130 + 120 CONTINUE + * +@@ -739,26 +741,26 @@ + * + * Initial Q + * +- TEMP = S1*A( ISTART, ISTART ) - WR*B( ISTART, ISTART ) +- TEMP2 = S1*A( ISTART+1, ISTART ) ++ TEMP = S1*H( ISTART, ISTART ) - WR*T( ISTART, ISTART ) ++ TEMP2 = S1*H( ISTART+1, ISTART ) + CALL DLARTG( TEMP, TEMP2, C, S, TEMPR ) + * + * Sweep + * + DO 190 J = ISTART, ILAST - 1 + IF( J.GT.ISTART ) THEN +- TEMP = A( J, J-1 ) +- CALL DLARTG( TEMP, A( J+1, J-1 ), C, S, A( J, J-1 ) ) +- A( J+1, J-1 ) = ZERO ++ TEMP = H( J, J-1 ) ++ CALL DLARTG( TEMP, H( J+1, J-1 ), C, S, H( J, J-1 ) ) ++ H( J+1, J-1 ) = ZERO + END IF + * + DO 140 JC = J, ILASTM +- TEMP = C*A( J, JC ) + S*A( J+1, JC ) +- A( J+1, JC ) = -S*A( J, JC ) + C*A( J+1, JC ) +- A( J, JC ) = TEMP +- TEMP2 = C*B( J, JC ) + S*B( J+1, JC ) +- B( J+1, JC ) = -S*B( J, JC ) + C*B( J+1, JC ) +- B( J, JC ) = TEMP2 ++ TEMP = C*H( J, JC ) + S*H( J+1, JC ) ++ H( J+1, JC ) = -S*H( J, JC ) + C*H( J+1, JC ) ++ H( J, JC ) = TEMP ++ TEMP2 = C*T( J, JC ) + S*T( J+1, JC ) ++ T( J+1, JC ) = -S*T( J, JC ) + C*T( J+1, JC ) ++ T( J, JC ) = TEMP2 + 140 CONTINUE + IF( ILQ ) THEN + DO 150 JR = 1, N +@@ -768,19 +770,19 @@ + 150 CONTINUE + END IF + * +- TEMP = B( J+1, J+1 ) +- CALL DLARTG( TEMP, B( J+1, J ), C, S, B( J+1, J+1 ) ) +- B( J+1, J ) = ZERO ++ TEMP = T( J+1, J+1 ) ++ CALL DLARTG( TEMP, T( J+1, J ), C, S, T( J+1, J+1 ) ) ++ T( J+1, J ) = ZERO + * + DO 160 JR = IFRSTM, MIN( J+2, ILAST ) +- TEMP = C*A( JR, J+1 ) + S*A( JR, J ) +- A( JR, J ) = -S*A( JR, J+1 ) + C*A( JR, J ) +- A( JR, J+1 ) = TEMP ++ TEMP = C*H( JR, J+1 ) + S*H( JR, J ) ++ H( JR, J ) = -S*H( JR, J+1 ) + C*H( JR, J ) ++ H( JR, J+1 ) = TEMP + 160 CONTINUE + DO 170 JR = IFRSTM, J +- TEMP = C*B( JR, J+1 ) + S*B( JR, J ) +- B( JR, J ) = -S*B( JR, J+1 ) + C*B( JR, J ) +- B( JR, J+1 ) = TEMP ++ TEMP = C*T( JR, J+1 ) + S*T( JR, J ) ++ T( JR, J ) = -S*T( JR, J+1 ) + C*T( JR, J ) ++ T( JR, J+1 ) = TEMP + 170 CONTINUE + IF( ILZ ) THEN + DO 180 JR = 1, N +@@ -816,8 +818,8 @@ + * B = ( ) with B11 non-negative. + * ( 0 B22 ) + * +- CALL DLASV2( B( ILAST-1, ILAST-1 ), B( ILAST-1, ILAST ), +- $ B( ILAST, ILAST ), B22, B11, SR, CR, SL, CL ) ++ CALL DLASV2( T( ILAST-1, ILAST-1 ), T( ILAST-1, ILAST ), ++ $ T( ILAST, ILAST ), B22, B11, SR, CR, SL, CL ) + * + IF( B11.LT.ZERO ) THEN + CR = -CR +@@ -826,17 +828,17 @@ + B22 = -B22 + END IF + * +- CALL DROT( ILASTM+1-IFIRST, A( ILAST-1, ILAST-1 ), LDA, +- $ A( ILAST, ILAST-1 ), LDA, CL, SL ) +- CALL DROT( ILAST+1-IFRSTM, A( IFRSTM, ILAST-1 ), 1, +- $ A( IFRSTM, ILAST ), 1, CR, SR ) ++ CALL DROT( ILASTM+1-IFIRST, H( ILAST-1, ILAST-1 ), LDH, ++ $ H( ILAST, ILAST-1 ), LDH, CL, SL ) ++ CALL DROT( ILAST+1-IFRSTM, H( IFRSTM, ILAST-1 ), 1, ++ $ H( IFRSTM, ILAST ), 1, CR, SR ) + * + IF( ILAST.LT.ILASTM ) +- $ CALL DROT( ILASTM-ILAST, B( ILAST-1, ILAST+1 ), LDB, +- $ B( ILAST, ILAST+1 ), LDA, CL, SL ) ++ $ CALL DROT( ILASTM-ILAST, T( ILAST-1, ILAST+1 ), LDT, ++ $ T( ILAST, ILAST+1 ), LDH, CL, SL ) + IF( IFRSTM.LT.ILAST-1 ) +- $ CALL DROT( IFIRST-IFRSTM, B( IFRSTM, ILAST-1 ), 1, +- $ B( IFRSTM, ILAST ), 1, CR, SR ) ++ $ CALL DROT( IFIRST-IFRSTM, T( IFRSTM, ILAST-1 ), 1, ++ $ T( IFRSTM, ILAST ), 1, CR, SR ) + * + IF( ILQ ) + $ CALL DROT( N, Q( 1, ILAST-1 ), 1, Q( 1, ILAST ), 1, CL, +@@ -845,17 +847,17 @@ + $ CALL DROT( N, Z( 1, ILAST-1 ), 1, Z( 1, ILAST ), 1, CR, + $ SR ) + * +- B( ILAST-1, ILAST-1 ) = B11 +- B( ILAST-1, ILAST ) = ZERO +- B( ILAST, ILAST-1 ) = ZERO +- B( ILAST, ILAST ) = B22 ++ T( ILAST-1, ILAST-1 ) = B11 ++ T( ILAST-1, ILAST ) = ZERO ++ T( ILAST, ILAST-1 ) = ZERO ++ T( ILAST, ILAST ) = B22 + * + * If B22 is negative, negate column ILAST + * + IF( B22.LT.ZERO ) THEN + DO 210 J = IFRSTM, ILAST +- A( J, ILAST ) = -A( J, ILAST ) +- B( J, ILAST ) = -B( J, ILAST ) ++ H( J, ILAST ) = -H( J, ILAST ) ++ T( J, ILAST ) = -T( J, ILAST ) + 210 CONTINUE + * + IF( ILZ ) THEN +@@ -869,8 +871,8 @@ + * + * Recompute shift + * +- CALL DLAG2( A( ILAST-1, ILAST-1 ), LDA, +- $ B( ILAST-1, ILAST-1 ), LDB, SAFMIN*SAFETY, S1, ++ CALL DLAG2( H( ILAST-1, ILAST-1 ), LDH, ++ $ T( ILAST-1, ILAST-1 ), LDT, SAFMIN*SAFETY, S1, + $ TEMP, WR, TEMP2, WI ) + * + * ------------------- Begin Timing Code ---------------------- +@@ -887,10 +889,10 @@ + * + * Do EISPACK (QZVAL) computation of alpha and beta + * +- A11 = A( ILAST-1, ILAST-1 ) +- A21 = A( ILAST, ILAST-1 ) +- A12 = A( ILAST-1, ILAST ) +- A22 = A( ILAST, ILAST ) ++ A11 = H( ILAST-1, ILAST-1 ) ++ A21 = H( ILAST, ILAST-1 ) ++ A12 = H( ILAST-1, ILAST ) ++ A22 = H( ILAST, ILAST ) + * + * Compute complex Givens rotation on right + * (Assume some element of C = (sA - wB) > unfl ) +@@ -907,10 +909,10 @@ + * + IF( ABS( C11R )+ABS( C11I )+ABS( C12 ).GT.ABS( C21 )+ + $ ABS( C22R )+ABS( C22I ) ) THEN +- T = DLAPY3( C12, C11R, C11I ) +- CZ = C12 / T +- SZR = -C11R / T +- SZI = -C11I / T ++ T1 = DLAPY3( C12, C11R, C11I ) ++ CZ = C12 / T1 ++ SZR = -C11R / T1 ++ SZI = -C11I / T1 + ELSE + CZ = DLAPY2( C22R, C22I ) + IF( CZ.LE.SAFMIN ) THEN +@@ -920,10 +922,10 @@ + ELSE + TEMPR = C22R / CZ + TEMPI = C22I / CZ +- T = DLAPY2( CZ, C21 ) +- CZ = CZ / T +- SZR = -C21*TEMPR / T +- SZI = C21*TEMPI / T ++ T1 = DLAPY2( CZ, C21 ) ++ CZ = CZ / T1 ++ SZR = -C21*TEMPR / T1 ++ SZI = C21*TEMPI / T1 + END IF + END IF + * +@@ -957,10 +959,10 @@ + SQI = TEMPI*A2R - TEMPR*A2I + END IF + END IF +- T = DLAPY3( CQ, SQR, SQI ) +- CQ = CQ / T +- SQR = SQR / T +- SQI = SQI / T ++ T1 = DLAPY3( CQ, SQR, SQI ) ++ CQ = CQ / T1 ++ SQR = SQR / T1 ++ SQI = SQI / T1 + * + * Compute diagonal elements of QBZ + * +@@ -1016,26 +1018,26 @@ + * + * We assume that the block is at least 3x3 + * +- AD11 = ( ASCALE*A( ILAST-1, ILAST-1 ) ) / +- $ ( BSCALE*B( ILAST-1, ILAST-1 ) ) +- AD21 = ( ASCALE*A( ILAST, ILAST-1 ) ) / +- $ ( BSCALE*B( ILAST-1, ILAST-1 ) ) +- AD12 = ( ASCALE*A( ILAST-1, ILAST ) ) / +- $ ( BSCALE*B( ILAST, ILAST ) ) +- AD22 = ( ASCALE*A( ILAST, ILAST ) ) / +- $ ( BSCALE*B( ILAST, ILAST ) ) +- U12 = B( ILAST-1, ILAST ) / B( ILAST, ILAST ) +- AD11L = ( ASCALE*A( IFIRST, IFIRST ) ) / +- $ ( BSCALE*B( IFIRST, IFIRST ) ) +- AD21L = ( ASCALE*A( IFIRST+1, IFIRST ) ) / +- $ ( BSCALE*B( IFIRST, IFIRST ) ) +- AD12L = ( ASCALE*A( IFIRST, IFIRST+1 ) ) / +- $ ( BSCALE*B( IFIRST+1, IFIRST+1 ) ) +- AD22L = ( ASCALE*A( IFIRST+1, IFIRST+1 ) ) / +- $ ( BSCALE*B( IFIRST+1, IFIRST+1 ) ) +- AD32L = ( ASCALE*A( IFIRST+2, IFIRST+1 ) ) / +- $ ( BSCALE*B( IFIRST+1, IFIRST+1 ) ) +- U12L = B( IFIRST, IFIRST+1 ) / B( IFIRST+1, IFIRST+1 ) ++ AD11 = ( ASCALE*H( ILAST-1, ILAST-1 ) ) / ++ $ ( BSCALE*T( ILAST-1, ILAST-1 ) ) ++ AD21 = ( ASCALE*H( ILAST, ILAST-1 ) ) / ++ $ ( BSCALE*T( ILAST-1, ILAST-1 ) ) ++ AD12 = ( ASCALE*H( ILAST-1, ILAST ) ) / ++ $ ( BSCALE*T( ILAST, ILAST ) ) ++ AD22 = ( ASCALE*H( ILAST, ILAST ) ) / ++ $ ( BSCALE*T( ILAST, ILAST ) ) ++ U12 = T( ILAST-1, ILAST ) / T( ILAST, ILAST ) ++ AD11L = ( ASCALE*H( IFIRST, IFIRST ) ) / ++ $ ( BSCALE*T( IFIRST, IFIRST ) ) ++ AD21L = ( ASCALE*H( IFIRST+1, IFIRST ) ) / ++ $ ( BSCALE*T( IFIRST, IFIRST ) ) ++ AD12L = ( ASCALE*H( IFIRST, IFIRST+1 ) ) / ++ $ ( BSCALE*T( IFIRST+1, IFIRST+1 ) ) ++ AD22L = ( ASCALE*H( IFIRST+1, IFIRST+1 ) ) / ++ $ ( BSCALE*T( IFIRST+1, IFIRST+1 ) ) ++ AD32L = ( ASCALE*H( IFIRST+2, IFIRST+1 ) ) / ++ $ ( BSCALE*T( IFIRST+1, IFIRST+1 ) ) ++ U12L = T( IFIRST, IFIRST+1 ) / T( IFIRST+1, IFIRST+1 ) + * + V( 1 ) = ( AD11-AD11L )*( AD22-AD11L ) - AD12*AD21 + + $ AD21*U12*AD11L + ( AD12L-AD11L*U12L )*AD21L +@@ -1057,27 +1059,27 @@ + * Zero (j-1)st column of A + * + IF( J.GT.ISTART ) THEN +- V( 1 ) = A( J, J-1 ) +- V( 2 ) = A( J+1, J-1 ) +- V( 3 ) = A( J+2, J-1 ) ++ V( 1 ) = H( J, J-1 ) ++ V( 2 ) = H( J+1, J-1 ) ++ V( 3 ) = H( J+2, J-1 ) + * +- CALL DLARFG( 3, A( J, J-1 ), V( 2 ), 1, TAU ) ++ CALL DLARFG( 3, H( J, J-1 ), V( 2 ), 1, TAU ) + V( 1 ) = ONE +- A( J+1, J-1 ) = ZERO +- A( J+2, J-1 ) = ZERO ++ H( J+1, J-1 ) = ZERO ++ H( J+2, J-1 ) = ZERO + END IF + * + DO 230 JC = J, ILASTM +- TEMP = TAU*( A( J, JC )+V( 2 )*A( J+1, JC )+V( 3 )* +- $ A( J+2, JC ) ) +- A( J, JC ) = A( J, JC ) - TEMP +- A( J+1, JC ) = A( J+1, JC ) - TEMP*V( 2 ) +- A( J+2, JC ) = A( J+2, JC ) - TEMP*V( 3 ) +- TEMP2 = TAU*( B( J, JC )+V( 2 )*B( J+1, JC )+V( 3 )* +- $ B( J+2, JC ) ) +- B( J, JC ) = B( J, JC ) - TEMP2 +- B( J+1, JC ) = B( J+1, JC ) - TEMP2*V( 2 ) +- B( J+2, JC ) = B( J+2, JC ) - TEMP2*V( 3 ) ++ TEMP = TAU*( H( J, JC )+V( 2 )*H( J+1, JC )+V( 3 )* ++ $ H( J+2, JC ) ) ++ H( J, JC ) = H( J, JC ) - TEMP ++ H( J+1, JC ) = H( J+1, JC ) - TEMP*V( 2 ) ++ H( J+2, JC ) = H( J+2, JC ) - TEMP*V( 3 ) ++ TEMP2 = TAU*( T( J, JC )+V( 2 )*T( J+1, JC )+V( 3 )* ++ $ T( J+2, JC ) ) ++ T( J, JC ) = T( J, JC ) - TEMP2 ++ T( J+1, JC ) = T( J+1, JC ) - TEMP2*V( 2 ) ++ T( J+2, JC ) = T( J+2, JC ) - TEMP2*V( 3 ) + 230 CONTINUE + IF( ILQ ) THEN + DO 240 JR = 1, N +@@ -1094,27 +1096,27 @@ + * Swap rows to pivot + * + ILPIVT = .FALSE. +- TEMP = MAX( ABS( B( J+1, J+1 ) ), ABS( B( J+1, J+2 ) ) ) +- TEMP2 = MAX( ABS( B( J+2, J+1 ) ), ABS( B( J+2, J+2 ) ) ) ++ TEMP = MAX( ABS( T( J+1, J+1 ) ), ABS( T( J+1, J+2 ) ) ) ++ TEMP2 = MAX( ABS( T( J+2, J+1 ) ), ABS( T( J+2, J+2 ) ) ) + IF( MAX( TEMP, TEMP2 ).LT.SAFMIN ) THEN + SCALE = ZERO + U1 = ONE + U2 = ZERO + GO TO 250 + ELSE IF( TEMP.GE.TEMP2 ) THEN +- W11 = B( J+1, J+1 ) +- W21 = B( J+2, J+1 ) +- W12 = B( J+1, J+2 ) +- W22 = B( J+2, J+2 ) +- U1 = B( J+1, J ) +- U2 = B( J+2, J ) ++ W11 = T( J+1, J+1 ) ++ W21 = T( J+2, J+1 ) ++ W12 = T( J+1, J+2 ) ++ W22 = T( J+2, J+2 ) ++ U1 = T( J+1, J ) ++ U2 = T( J+2, J ) + ELSE +- W21 = B( J+1, J+1 ) +- W11 = B( J+2, J+1 ) +- W22 = B( J+1, J+2 ) +- W12 = B( J+2, J+2 ) +- U2 = B( J+1, J ) +- U1 = B( J+2, J ) ++ W21 = T( J+1, J+1 ) ++ W11 = T( J+2, J+1 ) ++ W22 = T( J+1, J+2 ) ++ W12 = T( J+2, J+2 ) ++ U2 = T( J+1, J ) ++ U1 = T( J+2, J ) + END IF + * + * Swap columns if nec. +@@ -1164,9 +1166,9 @@ + * + * Compute Householder Vector + * +- T = SQRT( SCALE**2+U1**2+U2**2 ) +- TAU = ONE + SCALE / T +- VS = -ONE / ( SCALE+T ) ++ T1 = SQRT( SCALE**2+U1**2+U2**2 ) ++ TAU = ONE + SCALE / T1 ++ VS = -ONE / ( SCALE+T1 ) + V( 1 ) = ONE + V( 2 ) = VS*U1 + V( 3 ) = VS*U2 +@@ -1174,18 +1176,18 @@ + * Apply transformations from the right. + * + DO 260 JR = IFRSTM, MIN( J+3, ILAST ) +- TEMP = TAU*( A( JR, J )+V( 2 )*A( JR, J+1 )+V( 3 )* +- $ A( JR, J+2 ) ) +- A( JR, J ) = A( JR, J ) - TEMP +- A( JR, J+1 ) = A( JR, J+1 ) - TEMP*V( 2 ) +- A( JR, J+2 ) = A( JR, J+2 ) - TEMP*V( 3 ) ++ TEMP = TAU*( H( JR, J )+V( 2 )*H( JR, J+1 )+V( 3 )* ++ $ H( JR, J+2 ) ) ++ H( JR, J ) = H( JR, J ) - TEMP ++ H( JR, J+1 ) = H( JR, J+1 ) - TEMP*V( 2 ) ++ H( JR, J+2 ) = H( JR, J+2 ) - TEMP*V( 3 ) + 260 CONTINUE + DO 270 JR = IFRSTM, J + 2 +- TEMP = TAU*( B( JR, J )+V( 2 )*B( JR, J+1 )+V( 3 )* +- $ B( JR, J+2 ) ) +- B( JR, J ) = B( JR, J ) - TEMP +- B( JR, J+1 ) = B( JR, J+1 ) - TEMP*V( 2 ) +- B( JR, J+2 ) = B( JR, J+2 ) - TEMP*V( 3 ) ++ TEMP = TAU*( T( JR, J )+V( 2 )*T( JR, J+1 )+V( 3 )* ++ $ T( JR, J+2 ) ) ++ T( JR, J ) = T( JR, J ) - TEMP ++ T( JR, J+1 ) = T( JR, J+1 ) - TEMP*V( 2 ) ++ T( JR, J+2 ) = T( JR, J+2 ) - TEMP*V( 3 ) + 270 CONTINUE + IF( ILZ ) THEN + DO 280 JR = 1, N +@@ -1196,8 +1198,8 @@ + Z( JR, J+2 ) = Z( JR, J+2 ) - TEMP*V( 3 ) + 280 CONTINUE + END IF +- B( J+1, J ) = ZERO +- B( J+2, J ) = ZERO ++ T( J+1, J ) = ZERO ++ T( J+2, J ) = ZERO + 290 CONTINUE + * + * Last elements: Use Givens rotations +@@ -1205,17 +1207,17 @@ + * Rotations from the left + * + J = ILAST - 1 +- TEMP = A( J, J-1 ) +- CALL DLARTG( TEMP, A( J+1, J-1 ), C, S, A( J, J-1 ) ) +- A( J+1, J-1 ) = ZERO ++ TEMP = H( J, J-1 ) ++ CALL DLARTG( TEMP, H( J+1, J-1 ), C, S, H( J, J-1 ) ) ++ H( J+1, J-1 ) = ZERO + * + DO 300 JC = J, ILASTM +- TEMP = C*A( J, JC ) + S*A( J+1, JC ) +- A( J+1, JC ) = -S*A( J, JC ) + C*A( J+1, JC ) +- A( J, JC ) = TEMP +- TEMP2 = C*B( J, JC ) + S*B( J+1, JC ) +- B( J+1, JC ) = -S*B( J, JC ) + C*B( J+1, JC ) +- B( J, JC ) = TEMP2 ++ TEMP = C*H( J, JC ) + S*H( J+1, JC ) ++ H( J+1, JC ) = -S*H( J, JC ) + C*H( J+1, JC ) ++ H( J, JC ) = TEMP ++ TEMP2 = C*T( J, JC ) + S*T( J+1, JC ) ++ T( J+1, JC ) = -S*T( J, JC ) + C*T( J+1, JC ) ++ T( J, JC ) = TEMP2 + 300 CONTINUE + IF( ILQ ) THEN + DO 310 JR = 1, N +@@ -1227,19 +1229,19 @@ + * + * Rotations from the right. + * +- TEMP = B( J+1, J+1 ) +- CALL DLARTG( TEMP, B( J+1, J ), C, S, B( J+1, J+1 ) ) +- B( J+1, J ) = ZERO ++ TEMP = T( J+1, J+1 ) ++ CALL DLARTG( TEMP, T( J+1, J ), C, S, T( J+1, J+1 ) ) ++ T( J+1, J ) = ZERO + * + DO 320 JR = IFRSTM, ILAST +- TEMP = C*A( JR, J+1 ) + S*A( JR, J ) +- A( JR, J ) = -S*A( JR, J+1 ) + C*A( JR, J ) +- A( JR, J+1 ) = TEMP ++ TEMP = C*H( JR, J+1 ) + S*H( JR, J ) ++ H( JR, J ) = -S*H( JR, J+1 ) + C*H( JR, J ) ++ H( JR, J+1 ) = TEMP + 320 CONTINUE + DO 330 JR = IFRSTM, ILAST - 1 +- TEMP = C*B( JR, J+1 ) + S*B( JR, J ) +- B( JR, J ) = -S*B( JR, J+1 ) + C*B( JR, J ) +- B( JR, J+1 ) = TEMP ++ TEMP = C*T( JR, J+1 ) + S*T( JR, J ) ++ T( JR, J ) = -S*T( JR, J+1 ) + C*T( JR, J ) ++ T( JR, J+1 ) = TEMP + 330 CONTINUE + IF( ILZ ) THEN + DO 340 JR = 1, N +@@ -1290,15 +1292,15 @@ + * Set Eigenvalues 1:ILO-1 + * + DO 410 J = 1, ILO - 1 +- IF( B( J, J ).LT.ZERO ) THEN ++ IF( T( J, J ).LT.ZERO ) THEN + IF( ILSCHR ) THEN + DO 390 JR = 1, J +- A( JR, J ) = -A( JR, J ) +- B( JR, J ) = -B( JR, J ) ++ H( JR, J ) = -H( JR, J ) ++ T( JR, J ) = -T( JR, J ) + 390 CONTINUE + ELSE +- A( J, J ) = -A( J, J ) +- B( J, J ) = -B( J, J ) ++ H( J, J ) = -H( J, J ) ++ T( J, J ) = -T( J, J ) + END IF + IF( ILZ ) THEN + DO 400 JR = 1, N +@@ -1306,9 +1308,9 @@ + 400 CONTINUE + END IF + END IF +- ALPHAR( J ) = A( J, J ) ++ ALPHAR( J ) = H( J, J ) + ALPHAI( J ) = ZERO +- BETA( J ) = B( J, J ) ++ BETA( J ) = T( J, J ) + 410 CONTINUE + * + * Normal Termination +diff -uNr LAPACK.orig/TIMING/EIG/EIGSRC/dtgevc.f LAPACK/TIMING/EIG/EIGSRC/dtgevc.f +--- LAPACK.orig/TIMING/EIG/EIGSRC/dtgevc.f Thu Nov 4 14:28:32 1999 ++++ LAPACK/TIMING/EIG/EIGSRC/dtgevc.f Fri May 25 16:20:45 2001 +@@ -1,18 +1,18 @@ +- SUBROUTINE DTGEVC( SIDE, HOWMNY, SELECT, N, A, LDA, B, LDB, VL, ++ SUBROUTINE DTGEVC( SIDE, HOWMNY, SELECT, N, S, LDS, P, LDP, VL, + $ LDVL, VR, LDVR, MM, M, WORK, INFO ) + * + * -- LAPACK routine (instrumented to count operations, version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* June 30, 1999 ++* May 4, 2001 + * + * .. Scalar Arguments .. + CHARACTER HOWMNY, SIDE +- INTEGER INFO, LDA, LDB, LDVL, LDVR, M, MM, N ++ INTEGER INFO, LDP, LDS, LDVL, LDVR, M, MM, N + * .. + * .. Array Arguments .. + LOGICAL SELECT( * ) +- DOUBLE PRECISION A( LDA, * ), B( LDB, * ), VL( LDVL, * ), ++ DOUBLE PRECISION P( LDP, * ), S( LDS, * ), VL( LDVL, * ), + $ VR( LDVR, * ), WORK( * ) + * .. + * +@@ -33,35 +33,31 @@ + * Purpose + * ======= + * +-* DTGEVC computes some or all of the right and/or left generalized +-* eigenvectors of a pair of real upper triangular matrices (A,B). +-* +-* The right generalized eigenvector x and the left generalized +-* eigenvector y of (A,B) corresponding to a generalized eigenvalue +-* w are defined by: +-* +-* (A - wB) * x = 0 and y**H * (A - wB) = 0 +-* ++* DTGEVC computes some or all of the right and/or left eigenvectors of ++* a pair of real matrices (S,P), where S is a quasi-triangular matrix ++* and P is upper triangular. Matrix pairs of this type are produced by ++* the generalized Schur factorization of a matrix pair (A,B): ++* ++* A = Q*S*Z**T, B = Q*P*Z**T ++* ++* as computed by DGGHRD + DHGEQZ. ++* ++* The right eigenvector x and the left eigenvector y of (S,P) ++* corresponding to an eigenvalue w are defined by: ++* ++* S*x = w*P*x, (y**H)*S = w*(y**H)*P, ++* + * where y**H denotes the conjugate tranpose of y. +-* +-* If an eigenvalue w is determined by zero diagonal elements of both A +-* and B, a unit vector is returned as the corresponding eigenvector. +-* +-* If all eigenvectors are requested, the routine may either return +-* the matrices X and/or Y of right or left eigenvectors of (A,B), or +-* the products Z*X and/or Q*Y, where Z and Q are input orthogonal +-* matrices. If (A,B) was obtained from the generalized real-Schur +-* factorization of an original pair of matrices +-* (A0,B0) = (Q*A*Z**H,Q*B*Z**H), +-* then Z*X and Q*Y are the matrices of right or left eigenvectors of +-* A. +-* +-* A must be block upper triangular, with 1-by-1 and 2-by-2 diagonal +-* blocks. Corresponding to each 2-by-2 diagonal block is a complex +-* conjugate pair of eigenvalues and eigenvectors; only one +-* eigenvector of the pair is computed, namely the one corresponding +-* to the eigenvalue with positive imaginary part. +-* ++* The eigenvalues are not input to this routine, but are computed ++* directly from the diagonal blocks of S and P. ++* ++* This routine returns the matrices X and/or Y of right and left ++* eigenvectors of (S,P), or the products Z*X and/or Q*Y, ++* where Z and Q are input matrices. ++* If Q and Z are the orthogonal factors from the generalized Schur ++* factorization of a matrix pair (A,B), then Z*X and Q*Y ++* are the matrices of right and left eigenvectors of (A,B). ++* + * Arguments + * ========= + * +@@ -72,78 +68,84 @@ + * + * HOWMNY (input) CHARACTER*1 + * = 'A': compute all right and/or left eigenvectors; +-* = 'B': compute all right and/or left eigenvectors, and +-* backtransform them using the input matrices supplied +-* in VR and/or VL; ++* = 'B': compute all right and/or left eigenvectors, ++* backtransformed by the matrices in VR and/or VL; + * = 'S': compute selected right and/or left eigenvectors, + * specified by the logical array SELECT. + * + * SELECT (input) LOGICAL array, dimension (N) + * If HOWMNY='S', SELECT specifies the eigenvectors to be +-* computed. +-* If HOWMNY='A' or 'B', SELECT is not referenced. +-* To select the real eigenvector corresponding to the real +-* eigenvalue w(j), SELECT(j) must be set to .TRUE. To select +-* the complex eigenvector corresponding to a complex conjugate +-* pair w(j) and w(j+1), either SELECT(j) or SELECT(j+1) must +-* be set to .TRUE.. ++* computed. If w(j) is a real eigenvalue, the corresponding ++* real eigenvector is computed if SELECT(j) is .TRUE.. ++* If w(j) and w(j+1) are the real and imaginary parts of a ++* complex eigenvalue, the corresponding complex eigenvector ++* is computed if either SELECT(j) or SELECT(j+1) is .TRUE., ++* and on exit SELECT(j) is set to .TRUE. and SELECT(j+1) is ++* set to .FALSE.. ++* Not referenced if HOWMNY = 'A' or 'B'. + * + * N (input) INTEGER +-* The order of the matrices A and B. N >= 0. ++* The order of the matrices S and P. N >= 0. + * +-* A (input) DOUBLE PRECISION array, dimension (LDA,N) +-* The upper quasi-triangular matrix A. ++* S (input) DOUBLE PRECISION array, dimension (LDS,N) ++* The upper quasi-triangular matrix S from a generalized Schur ++* factorization, as computed by DHGEQZ. ++* ++* LDS (input) INTEGER ++* The leading dimension of array S. LDS >= max(1,N). ++* ++* P (input) DOUBLE PRECISION array, dimension (LDP,N) ++* The upper triangular matrix P from a generalized Schur ++* factorization, as computed by DHGEQZ. ++* 2-by-2 diagonal blocks of P corresponding to 2-by-2 blocks ++* of S must be in positive diagonal form. + * +-* LDA (input) INTEGER +-* The leading dimension of array A. LDA >= max(1,N). +-* +-* B (input) DOUBLE PRECISION array, dimension (LDB,N) +-* The upper triangular matrix B. If A has a 2-by-2 diagonal +-* block, then the corresponding 2-by-2 block of B must be +-* diagonal with positive elements. +-* +-* LDB (input) INTEGER +-* The leading dimension of array B. LDB >= max(1,N). ++* LDP (input) INTEGER ++* The leading dimension of array P. LDP >= max(1,N). + * + * VL (input/output) DOUBLE PRECISION array, dimension (LDVL,MM) + * On entry, if SIDE = 'L' or 'B' and HOWMNY = 'B', VL must + * contain an N-by-N matrix Q (usually the orthogonal matrix Q + * of left Schur vectors returned by DHGEQZ). + * On exit, if SIDE = 'L' or 'B', VL contains: +-* if HOWMNY = 'A', the matrix Y of left eigenvectors of (A,B); ++* if HOWMNY = 'A', the matrix Y of left eigenvectors of (S,P); + * if HOWMNY = 'B', the matrix Q*Y; +-* if HOWMNY = 'S', the left eigenvectors of (A,B) specified by ++* if HOWMNY = 'S', the left eigenvectors of (S,P) specified by + * SELECT, stored consecutively in the columns of + * VL, in the same order as their eigenvalues. +-* If SIDE = 'R', VL is not referenced. + * + * A complex eigenvector corresponding to a complex eigenvalue + * is stored in two consecutive columns, the first holding the + * real part, and the second the imaginary part. + * ++* Not referenced if SIDE = 'R'. ++* + * LDVL (input) INTEGER +-* The leading dimension of array VL. +-* LDVL >= max(1,N) if SIDE = 'L' or 'B'; LDVL >= 1 otherwise. ++* The leading dimension of array VL. LDVL >= 1, and if ++* SIDE = 'L' or 'B', LDVL >= N. + * + * VR (input/output) DOUBLE PRECISION array, dimension (LDVR,MM) + * On entry, if SIDE = 'R' or 'B' and HOWMNY = 'B', VR must +-* contain an N-by-N matrix Q (usually the orthogonal matrix Z ++* contain an N-by-N matrix Z (usually the orthogonal matrix Z + * of right Schur vectors returned by DHGEQZ). ++* + * On exit, if SIDE = 'R' or 'B', VR contains: +-* if HOWMNY = 'A', the matrix X of right eigenvectors of (A,B); +-* if HOWMNY = 'B', the matrix Z*X; +-* if HOWMNY = 'S', the right eigenvectors of (A,B) specified by +-* SELECT, stored consecutively in the columns of +-* VR, in the same order as their eigenvalues. +-* If SIDE = 'L', VR is not referenced. ++* if HOWMNY = 'A', the matrix X of right eigenvectors of (S,P); ++* if HOWMNY = 'B' or 'b', the matrix Z*X; ++* if HOWMNY = 'S' or 's', the right eigenvectors of (S,P) ++* specified by SELECT, stored consecutively in the ++* columns of VR, in the same order as their ++* eigenvalues. + * + * A complex eigenvector corresponding to a complex eigenvalue + * is stored in two consecutive columns, the first holding the + * real part and the second the imaginary part. ++* ++* Not referenced if SIDE = 'L'. + * + * LDVR (input) INTEGER +-* The leading dimension of the array VR. +-* LDVR >= max(1,N) if SIDE = 'R' or 'B'; LDVR >= 1 otherwise. ++* The leading dimension of the array VR. LDVR >= 1, and if ++* SIDE = 'R' or 'B', LDVR >= N. + * + * MM (input) INTEGER + * The number of columns in the arrays VL and/or VR. MM >= M. +@@ -212,7 +214,7 @@ + * partial sums. Since FORTRAN arrays are stored columnwise, this has + * the advantage that at each step, the elements of C that are accessed + * are adjacent to one another, whereas with the rowwise method, the +-* elements accessed at a step are spaced LDA (and LDB) words apart. ++* elements accessed at a step are spaced LDS (and LDP) words apart. + * + * When finding left eigenvectors, the matrix in question is the + * transpose of the one in storage, so the rowwise method then +@@ -239,8 +241,8 @@ + $ TEMP2R, ULP, XMAX, XSCALE + * .. + * .. Local Arrays .. +- DOUBLE PRECISION BDIAG( 2 ), SUM( 2, 2 ), SUMA( 2, 2 ), +- $ SUMB( 2, 2 ) ++ DOUBLE PRECISION BDIAG( 2 ), SUM( 2, 2 ), SUMS( 2, 2 ), ++ $ SUMP( 2, 2 ) + * .. + * .. External Functions .. + LOGICAL LSAME +@@ -265,7 +267,7 @@ + IHWMNY = 2 + ILALL = .FALSE. + ILBACK = .FALSE. +- ELSE IF( LSAME( HOWMNY, 'B' ) .OR. LSAME( HOWMNY, 'T' ) ) THEN ++ ELSE IF( LSAME( HOWMNY, 'B' ) ) THEN + IHWMNY = 3 + ILALL = .TRUE. + ILBACK = .TRUE. +@@ -297,9 +299,9 @@ + INFO = -2 + ELSE IF( N.LT.0 ) THEN + INFO = -4 +- ELSE IF( LDA.LT.MAX( 1, N ) ) THEN ++ ELSE IF( LDS.LT.MAX( 1, N ) ) THEN + INFO = -6 +- ELSE IF( LDB.LT.MAX( 1, N ) ) THEN ++ ELSE IF( LDP.LT.MAX( 1, N ) ) THEN + INFO = -8 + END IF + IF( INFO.NE.0 ) THEN +@@ -318,7 +320,7 @@ + GO TO 10 + END IF + IF( J.LT.N ) THEN +- IF( A( J+1, J ).NE.ZERO ) ++ IF( S( J+1, J ).NE.ZERO ) + $ ILCPLX = .TRUE. + END IF + IF( ILCPLX ) THEN +@@ -338,11 +340,11 @@ + ILABAD = .FALSE. + ILBBAD = .FALSE. + DO 20 J = 1, N - 1 +- IF( A( J+1, J ).NE.ZERO ) THEN +- IF( B( J, J ).EQ.ZERO .OR. B( J+1, J+1 ).EQ.ZERO .OR. +- $ B( J, J+1 ).NE.ZERO )ILBBAD = .TRUE. ++ IF( S( J+1, J ).NE.ZERO ) THEN ++ IF( P( J, J ).EQ.ZERO .OR. P( J+1, J+1 ).EQ.ZERO .OR. ++ $ P( J, J+1 ).NE.ZERO )ILBBAD = .TRUE. + IF( J.LT.N-1 ) THEN +- IF( A( J+2, J+1 ).NE.ZERO ) ++ IF( S( J+2, J+1 ).NE.ZERO ) + $ ILABAD = .TRUE. + END IF + END IF +@@ -385,30 +387,30 @@ + * blocks) of A and B to check for possible overflow in the + * triangular solver. + * +- ANORM = ABS( A( 1, 1 ) ) ++ ANORM = ABS( S( 1, 1 ) ) + IF( N.GT.1 ) +- $ ANORM = ANORM + ABS( A( 2, 1 ) ) +- BNORM = ABS( B( 1, 1 ) ) ++ $ ANORM = ANORM + ABS( S( 2, 1 ) ) ++ BNORM = ABS( P( 1, 1 ) ) + WORK( 1 ) = ZERO + WORK( N+1 ) = ZERO + * + DO 50 J = 2, N + TEMP = ZERO + TEMP2 = ZERO +- IF( A( J, J-1 ).EQ.ZERO ) THEN ++ IF( S( J, J-1 ).EQ.ZERO ) THEN + IEND = J - 1 + ELSE + IEND = J - 2 + END IF + DO 30 I = 1, IEND +- TEMP = TEMP + ABS( A( I, J ) ) +- TEMP2 = TEMP2 + ABS( B( I, J ) ) ++ TEMP = TEMP + ABS( S( I, J ) ) ++ TEMP2 = TEMP2 + ABS( P( I, J ) ) + 30 CONTINUE + WORK( J ) = TEMP + WORK( N+J ) = TEMP2 + DO 40 I = IEND + 1, MIN( J+1, N ) +- TEMP = TEMP + ABS( A( I, J ) ) +- TEMP2 = TEMP2 + ABS( B( I, J ) ) ++ TEMP = TEMP + ABS( S( I, J ) ) ++ TEMP2 = TEMP2 + ABS( P( I, J ) ) + 40 CONTINUE + ANORM = MAX( ANORM, TEMP ) + BNORM = MAX( BNORM, TEMP2 ) +@@ -442,7 +444,7 @@ + END IF + NW = 1 + IF( JE.LT.N ) THEN +- IF( A( JE+1, JE ).NE.ZERO ) THEN ++ IF( S( JE+1, JE ).NE.ZERO ) THEN + ILCPLX = .TRUE. + NW = 2 + END IF +@@ -461,8 +463,8 @@ + * (c) complex eigenvalue. + * + IF( .NOT.ILCPLX ) THEN +- IF( ABS( A( JE, JE ) ).LE.SAFMIN .AND. +- $ ABS( B( JE, JE ) ).LE.SAFMIN ) THEN ++ IF( ABS( S( JE, JE ) ).LE.SAFMIN .AND. ++ $ ABS( P( JE, JE ) ).LE.SAFMIN ) THEN + * + * Singular matrix pencil -- returns unit eigenvector + * +@@ -489,10 +491,10 @@ + * + * Real eigenvalue + * +- TEMP = ONE / MAX( ABS( A( JE, JE ) )*ASCALE, +- $ ABS( B( JE, JE ) )*BSCALE, SAFMIN ) +- SALFAR = ( TEMP*A( JE, JE ) )*ASCALE +- SBETA = ( TEMP*B( JE, JE ) )*BSCALE ++ TEMP = ONE / MAX( ABS( S( JE, JE ) )*ASCALE, ++ $ ABS( P( JE, JE ) )*BSCALE, SAFMIN ) ++ SALFAR = ( TEMP*S( JE, JE ) )*ASCALE ++ SBETA = ( TEMP*P( JE, JE ) )*BSCALE + ACOEF = SBETA*ASCALE + BCOEFR = SALFAR*BSCALE + BCOEFI = ZERO +@@ -534,7 +536,7 @@ + * + * Complex eigenvalue + * +- CALL DLAG2( A( JE, JE ), LDA, B( JE, JE ), LDB, ++ CALL DLAG2( S( JE, JE ), LDS, P( JE, JE ), LDP, + $ SAFMIN*SAFETY, ACOEF, TEMP, BCOEFR, TEMP2, + $ BCOEFI ) + BCOEFI = -BCOEFI +@@ -566,9 +568,9 @@ + * + * Compute first two components of eigenvector + * +- TEMP = ACOEF*A( JE+1, JE ) +- TEMP2R = ACOEF*A( JE, JE ) - BCOEFR*B( JE, JE ) +- TEMP2I = -BCOEFI*B( JE, JE ) ++ TEMP = ACOEF*S( JE+1, JE ) ++ TEMP2R = ACOEF*S( JE, JE ) - BCOEFR*P( JE, JE ) ++ TEMP2I = -BCOEFI*P( JE, JE ) + IF( ABS( TEMP ).GT.ABS( TEMP2R )+ABS( TEMP2I ) ) THEN + WORK( 2*N+JE ) = ONE + WORK( 3*N+JE ) = ZERO +@@ -577,10 +579,10 @@ + ELSE + WORK( 2*N+JE+1 ) = ONE + WORK( 3*N+JE+1 ) = ZERO +- TEMP = ACOEF*A( JE, JE+1 ) +- WORK( 2*N+JE ) = ( BCOEFR*B( JE+1, JE+1 )-ACOEF* +- $ A( JE+1, JE+1 ) ) / TEMP +- WORK( 3*N+JE ) = BCOEFI*B( JE+1, JE+1 ) / TEMP ++ TEMP = ACOEF*S( JE, JE+1 ) ++ WORK( 2*N+JE ) = ( BCOEFR*P( JE+1, JE+1 )-ACOEF* ++ $ S( JE+1, JE+1 ) ) / TEMP ++ WORK( 3*N+JE ) = BCOEFI*P( JE+1, JE+1 ) / TEMP + END IF + XMAX = MAX( ABS( WORK( 2*N+JE ) )+ABS( WORK( 3*N+JE ) ), + $ ABS( WORK( 2*N+JE+1 ) )+ABS( WORK( 3*N+JE+1 ) ) ) +@@ -610,11 +612,11 @@ + END IF + * + NA = 1 +- BDIAG( 1 ) = B( J, J ) ++ BDIAG( 1 ) = P( J, J ) + IF( J.LT.N ) THEN +- IF( A( J+1, J ).NE.ZERO ) THEN ++ IF( S( J+1, J ).NE.ZERO ) THEN + IL2BY2 = .TRUE. +- BDIAG( 2 ) = B( J+1, J+1 ) ++ BDIAG( 2 ) = P( J+1, J+1 ) + NA = 2 + * ---------------- Begin Timing Code ---------------- + IN2BY2 = IN2BY2 + 1 +@@ -646,13 +648,13 @@ + * Compute dot products + * + * j-1 +-* SUM = sum conjg( a*A(k,j) - b*B(k,j) )*x(k) ++* SUM = sum conjg( a*S(k,j) - b*P(k,j) )*x(k) + * k=je + * + * To reduce the op count, this is done as + * + * _ j-1 _ j-1 +-* a*conjg( sum A(k,j)*x(k) ) - b*conjg( sum B(k,j)*x(k) ) ++* a*conjg( sum S(k,j)*x(k) ) - b*conjg( sum P(k,j)*x(k) ) + * k=je k=je + * + * which may cause underflow problems if A or B are close +@@ -689,15 +691,15 @@ + *$PL$ CMCHAR='*' + * + DO 110 JA = 1, NA +- SUMA( JA, JW ) = ZERO +- SUMB( JA, JW ) = ZERO ++ SUMS( JA, JW ) = ZERO ++ SUMP( JA, JW ) = ZERO + * + DO 100 JR = JE, J - 1 +- SUMA( JA, JW ) = SUMA( JA, JW ) + +- $ A( JR, J+JA-1 )* ++ SUMS( JA, JW ) = SUMS( JA, JW ) + ++ $ S( JR, J+JA-1 )* + $ WORK( ( JW+1 )*N+JR ) +- SUMB( JA, JW ) = SUMB( JA, JW ) + +- $ B( JR, J+JA-1 )* ++ SUMP( JA, JW ) = SUMP( JA, JW ) + ++ $ P( JR, J+JA-1 )* + $ WORK( ( JW+1 )*N+JR ) + 100 CONTINUE + 110 CONTINUE +@@ -717,15 +719,15 @@ + * + DO 130 JA = 1, NA + IF( ILCPLX ) THEN +- SUM( JA, 1 ) = -ACOEF*SUMA( JA, 1 ) + +- $ BCOEFR*SUMB( JA, 1 ) - +- $ BCOEFI*SUMB( JA, 2 ) +- SUM( JA, 2 ) = -ACOEF*SUMA( JA, 2 ) + +- $ BCOEFR*SUMB( JA, 2 ) + +- $ BCOEFI*SUMB( JA, 1 ) ++ SUM( JA, 1 ) = -ACOEF*SUMS( JA, 1 ) + ++ $ BCOEFR*SUMP( JA, 1 ) - ++ $ BCOEFI*SUMP( JA, 2 ) ++ SUM( JA, 2 ) = -ACOEF*SUMS( JA, 2 ) + ++ $ BCOEFR*SUMP( JA, 2 ) + ++ $ BCOEFI*SUMP( JA, 1 ) + ELSE +- SUM( JA, 1 ) = -ACOEF*SUMA( JA, 1 ) + +- $ BCOEFR*SUMB( JA, 1 ) ++ SUM( JA, 1 ) = -ACOEF*SUMS( JA, 1 ) + ++ $ BCOEFR*SUMP( JA, 1 ) + END IF + 130 CONTINUE + * +@@ -733,7 +735,7 @@ + * Solve ( a A - b B ) y = SUM(,) + * with scaling and perturbation of the denominator + * +- CALL DLALN2( .TRUE., NA, NW, DMIN, ACOEF, A( J, J ), LDA, ++ CALL DLALN2( .TRUE., NA, NW, DMIN, ACOEF, S( J, J ), LDS, + $ BDIAG( 1 ), BDIAG( 2 ), SUM, 2, BCOEFR, + $ BCOEFI, WORK( 2*N+J ), N, SCALE, TEMP, + $ IINFO ) +@@ -859,7 +861,7 @@ + END IF + NW = 1 + IF( JE.GT.1 ) THEN +- IF( A( JE, JE-1 ).NE.ZERO ) THEN ++ IF( S( JE, JE-1 ).NE.ZERO ) THEN + ILCPLX = .TRUE. + NW = 2 + END IF +@@ -878,8 +880,8 @@ + * (c) complex eigenvalue. + * + IF( .NOT.ILCPLX ) THEN +- IF( ABS( A( JE, JE ) ).LE.SAFMIN .AND. +- $ ABS( B( JE, JE ) ).LE.SAFMIN ) THEN ++ IF( ABS( S( JE, JE ) ).LE.SAFMIN .AND. ++ $ ABS( P( JE, JE ) ).LE.SAFMIN ) THEN + * + * Singular matrix pencil -- returns unit eigenvector + * +@@ -908,10 +910,10 @@ + * + * Real eigenvalue + * +- TEMP = ONE / MAX( ABS( A( JE, JE ) )*ASCALE, +- $ ABS( B( JE, JE ) )*BSCALE, SAFMIN ) +- SALFAR = ( TEMP*A( JE, JE ) )*ASCALE +- SBETA = ( TEMP*B( JE, JE ) )*BSCALE ++ TEMP = ONE / MAX( ABS( S( JE, JE ) )*ASCALE, ++ $ ABS( P( JE, JE ) )*BSCALE, SAFMIN ) ++ SALFAR = ( TEMP*S( JE, JE ) )*ASCALE ++ SBETA = ( TEMP*P( JE, JE ) )*BSCALE + ACOEF = SBETA*ASCALE + BCOEFR = SALFAR*BSCALE + BCOEFI = ZERO +@@ -954,14 +956,14 @@ + * (See "Further Details", above.) + * + DO 260 JR = 1, JE - 1 +- WORK( 2*N+JR ) = BCOEFR*B( JR, JE ) - +- $ ACOEF*A( JR, JE ) ++ WORK( 2*N+JR ) = BCOEFR*P( JR, JE ) - ++ $ ACOEF*S( JR, JE ) + 260 CONTINUE + ELSE + * + * Complex eigenvalue + * +- CALL DLAG2( A( JE-1, JE-1 ), LDA, B( JE-1, JE-1 ), LDB, ++ CALL DLAG2( S( JE-1, JE-1 ), LDS, P( JE-1, JE-1 ), LDP, + $ SAFMIN*SAFETY, ACOEF, TEMP, BCOEFR, TEMP2, + $ BCOEFI ) + IF( BCOEFI.EQ.ZERO ) THEN +@@ -993,9 +995,9 @@ + * Compute first two components of eigenvector + * and contribution to sums + * +- TEMP = ACOEF*A( JE, JE-1 ) +- TEMP2R = ACOEF*A( JE, JE ) - BCOEFR*B( JE, JE ) +- TEMP2I = -BCOEFI*B( JE, JE ) ++ TEMP = ACOEF*S( JE, JE-1 ) ++ TEMP2R = ACOEF*S( JE, JE ) - BCOEFR*P( JE, JE ) ++ TEMP2I = -BCOEFI*P( JE, JE ) + IF( ABS( TEMP ).GE.ABS( TEMP2R )+ABS( TEMP2I ) ) THEN + WORK( 2*N+JE ) = ONE + WORK( 3*N+JE ) = ZERO +@@ -1004,10 +1006,10 @@ + ELSE + WORK( 2*N+JE-1 ) = ONE + WORK( 3*N+JE-1 ) = ZERO +- TEMP = ACOEF*A( JE-1, JE ) +- WORK( 2*N+JE ) = ( BCOEFR*B( JE-1, JE-1 )-ACOEF* +- $ A( JE-1, JE-1 ) ) / TEMP +- WORK( 3*N+JE ) = BCOEFI*B( JE-1, JE-1 ) / TEMP ++ TEMP = ACOEF*S( JE-1, JE ) ++ WORK( 2*N+JE ) = ( BCOEFR*P( JE-1, JE-1 )-ACOEF* ++ $ S( JE-1, JE-1 ) ) / TEMP ++ WORK( 3*N+JE ) = BCOEFI*P( JE-1, JE-1 ) / TEMP + END IF + * + XMAX = MAX( ABS( WORK( 2*N+JE ) )+ABS( WORK( 3*N+JE ) ), +@@ -1027,12 +1029,12 @@ + CRE2B = BCOEFR*WORK( 2*N+JE ) - BCOEFI*WORK( 3*N+JE ) + CIM2B = BCOEFI*WORK( 2*N+JE ) + BCOEFR*WORK( 3*N+JE ) + DO 270 JR = 1, JE - 2 +- WORK( 2*N+JR ) = -CREALA*A( JR, JE-1 ) + +- $ CREALB*B( JR, JE-1 ) - +- $ CRE2A*A( JR, JE ) + CRE2B*B( JR, JE ) +- WORK( 3*N+JR ) = -CIMAGA*A( JR, JE-1 ) + +- $ CIMAGB*B( JR, JE-1 ) - +- $ CIM2A*A( JR, JE ) + CIM2B*B( JR, JE ) ++ WORK( 2*N+JR ) = -CREALA*S( JR, JE-1 ) + ++ $ CREALB*P( JR, JE-1 ) - ++ $ CRE2A*S( JR, JE ) + CRE2B*P( JR, JE ) ++ WORK( 3*N+JR ) = -CIMAGA*S( JR, JE-1 ) + ++ $ CIMAGB*P( JR, JE-1 ) - ++ $ CIM2A*S( JR, JE ) + CIM2B*P( JR, JE ) + 270 CONTINUE + END IF + * +@@ -1054,7 +1056,7 @@ + * next iteration to process it (when it will be j:j+1) + * + IF( .NOT.IL2BY2 .AND. J.GT.1 ) THEN +- IF( A( J, J-1 ).NE.ZERO ) THEN ++ IF( S( J, J-1 ).NE.ZERO ) THEN + IL2BY2 = .TRUE. + * -------------- Begin Timing Code ----------------- + IN2BY2 = IN2BY2 + 1 +@@ -1062,18 +1064,18 @@ + GO TO 370 + END IF + END IF +- BDIAG( 1 ) = B( J, J ) ++ BDIAG( 1 ) = P( J, J ) + IF( IL2BY2 ) THEN + NA = 2 +- BDIAG( 2 ) = B( J+1, J+1 ) ++ BDIAG( 2 ) = P( J+1, J+1 ) + ELSE + NA = 1 + END IF + * + * Compute x(j) (and x(j+1), if 2-by-2 block) + * +- CALL DLALN2( .FALSE., NA, NW, DMIN, ACOEF, A( J, J ), +- $ LDA, BDIAG( 1 ), BDIAG( 2 ), WORK( 2*N+J ), ++ CALL DLALN2( .FALSE., NA, NW, DMIN, ACOEF, S( J, J ), ++ $ LDS, BDIAG( 1 ), BDIAG( 2 ), WORK( 2*N+J ), + $ N, BCOEFR, BCOEFI, SUM, 2, SCALE, TEMP, + $ IINFO ) + IF( SCALE.LT.ONE ) THEN +@@ -1096,7 +1098,7 @@ + 300 CONTINUE + 310 CONTINUE + * +-* w = w + x(j)*(a A(*,j) - b B(*,j) ) with scaling ++* w = w + x(j)*(a S(*,j) - b P(*,j) ) with scaling + * + IF( J.GT.1 ) THEN + * +@@ -1137,19 +1139,19 @@ + $ BCOEFR*WORK( 3*N+J+JA-1 ) + DO 340 JR = 1, J - 1 + WORK( 2*N+JR ) = WORK( 2*N+JR ) - +- $ CREALA*A( JR, J+JA-1 ) + +- $ CREALB*B( JR, J+JA-1 ) ++ $ CREALA*S( JR, J+JA-1 ) + ++ $ CREALB*P( JR, J+JA-1 ) + WORK( 3*N+JR ) = WORK( 3*N+JR ) - +- $ CIMAGA*A( JR, J+JA-1 ) + +- $ CIMAGB*B( JR, J+JA-1 ) ++ $ CIMAGA*S( JR, J+JA-1 ) + ++ $ CIMAGB*P( JR, J+JA-1 ) + 340 CONTINUE + ELSE + CREALA = ACOEF*WORK( 2*N+J+JA-1 ) + CREALB = BCOEFR*WORK( 2*N+J+JA-1 ) + DO 350 JR = 1, J - 1 + WORK( 2*N+JR ) = WORK( 2*N+JR ) - +- $ CREALA*A( JR, J+JA-1 ) + +- $ CREALB*B( JR, J+JA-1 ) ++ $ CREALA*S( JR, J+JA-1 ) + ++ $ CREALB*P( JR, J+JA-1 ) + 350 CONTINUE + END IF + 360 CONTINUE +diff -uNr LAPACK.orig/TIMING/EIG/EIGSRC/dtrevc.f LAPACK/TIMING/EIG/EIGSRC/dtrevc.f +--- LAPACK.orig/TIMING/EIG/EIGSRC/dtrevc.f Thu Nov 4 14:28:33 1999 ++++ LAPACK/TIMING/EIG/EIGSRC/dtrevc.f Fri May 25 16:21:00 2001 +@@ -4,7 +4,7 @@ + * -- LAPACK routine (instrumented to count operations, version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* June 30, 1999 ++* May 7, 2001 + * + * .. Scalar Arguments .. + CHARACTER HOWMNY, SIDE +@@ -30,28 +30,23 @@ + * + * DTREVC computes some or all of the right and/or left eigenvectors of + * a real upper quasi-triangular matrix T. +-* ++* Matrices of this type are produced by the Schur factorization of ++* a real general matrix: A = Q*T*Q**T, as computed by DHSEQR. ++* + * The right eigenvector x and the left eigenvector y of T corresponding + * to an eigenvalue w are defined by: +-* +-* T*x = w*x, y'*T = w*y' +-* +-* where y' denotes the conjugate transpose of the vector y. +-* +-* If all eigenvectors are requested, the routine may either return the +-* matrices X and/or Y of right or left eigenvectors of T, or the +-* products Q*X and/or Q*Y, where Q is an input orthogonal +-* matrix. If T was obtained from the real-Schur factorization of an +-* original matrix A = Q*T*Q', then Q*X and Q*Y are the matrices of +-* right or left eigenvectors of A. +-* +-* T must be in Schur canonical form (as returned by DHSEQR), that is, +-* block upper triangular with 1-by-1 and 2-by-2 diagonal blocks; each +-* 2-by-2 diagonal block has its diagonal elements equal and its +-* off-diagonal elements of opposite sign. Corresponding to each 2-by-2 +-* diagonal block is a complex conjugate pair of eigenvalues and +-* eigenvectors; only one eigenvector of the pair is computed, namely +-* the one corresponding to the eigenvalue with positive imaginary part. ++* ++* T*x = w*x, (y**H)*T = w*(y**H) ++* ++* where y**H denotes the conjugate transpose of y. ++* The eigenvalues are not input to this routine, but are read directly ++* from the diagonal blocks of T. ++* ++* This routine returns the matrices X and/or Y of right and left ++* eigenvectors of T, or the products Q*X and/or Q*Y, where Q is an ++* input matrix. If Q is the orthogonal factor that reduces a matrix ++* A to Schur form T, then Q*X and Q*Y are the matrices of right and ++* left eigenvectors of A. + * + * Arguments + * ========= +@@ -64,21 +59,21 @@ + * HOWMNY (input) CHARACTER*1 + * = 'A': compute all right and/or left eigenvectors; + * = 'B': compute all right and/or left eigenvectors, +-* and backtransform them using the input matrices +-* supplied in VR and/or VL; ++* backtransformed by the matrices in VR and/or VL; + * = 'S': compute selected right and/or left eigenvectors, +-* specified by the logical array SELECT. ++* as indicated by the logical array SELECT. + * + * SELECT (input/output) LOGICAL array, dimension (N) + * If HOWMNY = 'S', SELECT specifies the eigenvectors to be + * computed. +-* If HOWMNY = 'A' or 'B', SELECT is not referenced. +-* To select the real eigenvector corresponding to a real +-* eigenvalue w(j), SELECT(j) must be set to .TRUE.. To select +-* the complex eigenvector corresponding to a complex conjugate +-* pair w(j) and w(j+1), either SELECT(j) or SELECT(j+1) must be +-* set to .TRUE.; then on exit SELECT(j) is .TRUE. and +-* SELECT(j+1) is .FALSE.. ++* If w(j) is a real eigenvalue, the corresponding real ++* eigenvector is computed if SELECT(j) is .TRUE.. ++* If w(j) and w(j+1) are the real and imaginary parts of a ++* complex eigenvalue, the corresponding complex eigenvector is ++* computed if either SELECT(j) or SELECT(j+1) is .TRUE., and ++* on exit SELECT(j) is set to .TRUE. and SELECT(j+1) is set to ++* .FALSE.. ++* Not referenced if HOWMNY = 'A' or 'B'. + * + * N (input) INTEGER + * The order of the matrix T. N >= 0. +@@ -95,15 +90,6 @@ + * of Schur vectors returned by DHSEQR). + * On exit, if SIDE = 'L' or 'B', VL contains: + * if HOWMNY = 'A', the matrix Y of left eigenvectors of T; +-* VL has the same quasi-lower triangular form +-* as T'. If T(i,i) is a real eigenvalue, then +-* the i-th column VL(i) of VL is its +-* corresponding eigenvector. If T(i:i+1,i:i+1) +-* is a 2-by-2 block whose eigenvalues are +-* complex-conjugate eigenvalues of T, then +-* VL(i)+sqrt(-1)*VL(i+1) is the complex +-* eigenvector corresponding to the eigenvalue +-* with positive real part. + * if HOWMNY = 'B', the matrix Q*Y; + * if HOWMNY = 'S', the left eigenvectors of T specified by + * SELECT, stored consecutively in the columns +@@ -112,11 +98,11 @@ + * A complex eigenvector corresponding to a complex eigenvalue + * is stored in two consecutive columns, the first holding the + * real part, and the second the imaginary part. +-* If SIDE = 'R', VL is not referenced. ++* Not referenced if SIDE = 'R'. + * + * LDVL (input) INTEGER +-* The leading dimension of the array VL. LDVL >= max(1,N) if +-* SIDE = 'L' or 'B'; LDVL >= 1 otherwise. ++* The leading dimension of the array VL. LDVL >= 1, and if ++* SIDE = 'L' or 'B', LDVL >= N. + * + * VR (input/output) DOUBLE PRECISION array, dimension (LDVR,MM) + * On entry, if SIDE = 'R' or 'B' and HOWMNY = 'B', VR must +@@ -124,15 +110,6 @@ + * of Schur vectors returned by DHSEQR). + * On exit, if SIDE = 'R' or 'B', VR contains: + * if HOWMNY = 'A', the matrix X of right eigenvectors of T; +-* VR has the same quasi-upper triangular form +-* as T. If T(i,i) is a real eigenvalue, then +-* the i-th column VR(i) of VR is its +-* corresponding eigenvector. If T(i:i+1,i:i+1) +-* is a 2-by-2 block whose eigenvalues are +-* complex-conjugate eigenvalues of T, then +-* VR(i)+sqrt(-1)*VR(i+1) is the complex +-* eigenvector corresponding to the eigenvalue +-* with positive real part. + * if HOWMNY = 'B', the matrix Q*X; + * if HOWMNY = 'S', the right eigenvectors of T specified by + * SELECT, stored consecutively in the columns +@@ -141,11 +118,11 @@ + * A complex eigenvector corresponding to a complex eigenvalue + * is stored in two consecutive columns, the first holding the + * real part and the second the imaginary part. +-* If SIDE = 'L', VR is not referenced. ++* Not referenced if SIDE = 'L'. + * + * LDVR (input) INTEGER +-* The leading dimension of the array VR. LDVR >= max(1,N) if +-* SIDE = 'R' or 'B'; LDVR >= 1 otherwise. ++* The leading dimension of the array VR. LDVR >= 1, and if ++* SIDE = 'R' or 'B', LDVR >= N. + * + * MM (input) INTEGER + * The number of columns in the arrays VL and/or VR. MM >= M. +diff -uNr LAPACK.orig/TIMING/EIG/EIGSRC/sbdsqr.f LAPACK/TIMING/EIG/EIGSRC/sbdsqr.f +--- LAPACK.orig/TIMING/EIG/EIGSRC/sbdsqr.f Thu Nov 4 14:28:32 1999 ++++ LAPACK/TIMING/EIG/EIGSRC/sbdsqr.f Fri May 25 16:19:49 2001 +@@ -4,7 +4,7 @@ + * -- LAPACK routine (instrumented to count operations, version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* October 31, 1999 ++* April 25, 2001 + * + * .. Scalar Arguments .. + CHARACTER UPLO +@@ -26,14 +26,26 @@ + * Purpose + * ======= + * +-* SBDSQR computes the singular value decomposition (SVD) of a real +-* N-by-N (upper or lower) bidiagonal matrix B: B = Q * S * P' (P' +-* denotes the transpose of P), where S is a diagonal matrix with +-* non-negative diagonal elements (the singular values of B), and Q +-* and P are orthogonal matrices. ++* SBDSQR computes the singular values and, optionally, the right and/or ++* left singular vectors from the singular value decomposition (SVD) of ++* a real N-by-N (upper or lower) bidiagonal matrix B using the implicit ++* zero-shift QR algorithm. The SVD of B has the form ++* ++* B = Q * S * P**T ++* ++* where S is the diagonal matrix of singular values, Q is an orthogonal ++* matrix of left singular vectors, and P is an orthogonal matrix of ++* right singular vectors. If left singular vectors are requested, this ++* subroutine actually returns U*Q instead of Q, and, if right singular ++* vectors are requested, this subroutine returns P**T*VT instead of ++* P**T, for given real input matrices U and VT. When U and VT are the ++* orthogonal matrices that reduce a general matrix A to bidiagonal ++* form: A = U*B*VT, as computed by SGEBRD, then + * +-* The routine computes S, and optionally computes U * Q, P' * VT, +-* or Q' * C, for given real input matrices U, VT, and C. ++* A = (U*Q) * S * (P**T*VT) ++* ++* is the SVD of A. Optionally, the subroutine may also compute Q**T*C ++* for a given real input matrix C. + * + * See "Computing Small Singular Values of Bidiagonal Matrices With + * Guaranteed High Relative Accuracy," by J. Demmel and W. Kahan, +@@ -69,18 +81,17 @@ + * order. + * + * E (input/output) REAL array, dimension (N) +-* On entry, the elements of E contain the +-* offdiagonal elements of the bidiagonal matrix whose SVD +-* is desired. On normal exit (INFO = 0), E is destroyed. +-* If the algorithm does not converge (INFO > 0), D and E ++* On entry, the N-1 offdiagonal elements of the bidiagonal ++* matrix B. ++* On exit, if INFO = 0, E is destroyed; if INFO > 0, D and E + * will contain the diagonal and superdiagonal elements of a + * bidiagonal matrix orthogonally equivalent to the one given + * as input. E(N) is used for workspace. + * + * VT (input/output) REAL array, dimension (LDVT, NCVT) + * On entry, an N-by-NCVT matrix VT. +-* On exit, VT is overwritten by P' * VT. +-* VT is not referenced if NCVT = 0. ++* On exit, VT is overwritten by P**T * VT. ++* Not referenced if NCVT = 0. + * + * LDVT (input) INTEGER + * The leading dimension of the array VT. +@@ -89,21 +100,22 @@ + * U (input/output) REAL array, dimension (LDU, N) + * On entry, an NRU-by-N matrix U. + * On exit, U is overwritten by U * Q. +-* U is not referenced if NRU = 0. ++* Not referenced if NRU = 0. + * + * LDU (input) INTEGER + * The leading dimension of the array U. LDU >= max(1,NRU). + * + * C (input/output) REAL array, dimension (LDC, NCC) + * On entry, an N-by-NCC matrix C. +-* On exit, C is overwritten by Q' * C. +-* C is not referenced if NCC = 0. ++* On exit, C is overwritten by Q**T * C. ++* Not referenced if NCC = 0. + * + * LDC (input) INTEGER + * The leading dimension of the array C. + * LDC >= max(1,N) if NCC > 0; LDC >=1 if NCC = 0. + * +-* WORK (workspace) REAL array, dimension (4*N) ++* WORK (workspace) REAL array, dimension (2*N) ++* if NCVT = NRU = NCC = 0, (max(1, 4*N-4)) otherwise + * + * INFO (output) INTEGER + * = 0: successful exit +diff -uNr LAPACK.orig/TIMING/EIG/EIGSRC/sgghrd.f LAPACK/TIMING/EIG/EIGSRC/sgghrd.f +--- LAPACK.orig/TIMING/EIG/EIGSRC/sgghrd.f Thu Nov 4 14:28:29 1999 ++++ LAPACK/TIMING/EIG/EIGSRC/sgghrd.f Fri May 25 16:20:10 2001 +@@ -4,7 +4,7 @@ + * -- LAPACK routine (instrumented to count operations, version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* September 30, 1994 ++* April 26, 2001 + * + * .. Scalar Arguments .. + CHARACTER COMPQ, COMPZ +@@ -33,16 +33,32 @@ + * + * SGGHRD reduces a pair of real matrices (A,B) to generalized upper + * Hessenberg form using orthogonal transformations, where A is a +-* general matrix and B is upper triangular: Q' * A * Z = H and +-* Q' * B * Z = T, where H is upper Hessenberg, T is upper triangular, +-* and Q and Z are orthogonal, and ' means transpose. ++* general matrix and B is upper triangular. The form of the ++* generalized eigenvalue problem is ++* A*x = lambda*B*x, ++* and B is typically made upper triangular by computing its QR ++* factorization and moving the orthogonal matrix Q to the left side ++* of the equation. ++* ++* This subroutine simultaneously reduces A to a Hessenberg matrix H: ++* Q**T*A*Z = H ++* and transforms B to another upper triangular matrix T: ++* Q**T*B*Z = T ++* in order to reduce the problem to its standard form ++* H*y = lambda*T*y ++* where y = Z**T*x. + * + * The orthogonal matrices Q and Z are determined as products of Givens + * rotations. They may either be formed explicitly, or they may be + * postmultiplied into input matrices Q1 and Z1, so that + * +-* Q1 * A * Z1' = (Q1*Q) * H * (Z1*Z)' +-* Q1 * B * Z1' = (Q1*Q) * T * (Z1*Z)' ++* Q1 * A * Z1**T = (Q1*Q) * H * (Z1*Z)**T ++* ++* Q1 * B * Z1**T = (Q1*Q) * T * (Z1*Z)**T ++* ++* If Q1 is the orthogonal matrix from the QR factorization of B in the ++* original equation A*x = lambda*B*x, then SGGHRD reduces the original ++* problem to generalized Hessenberg form. + * + * Arguments + * ========= +@@ -66,10 +82,11 @@ + * + * ILO (input) INTEGER + * IHI (input) INTEGER +-* It is assumed that A is already upper triangular in rows and +-* columns 1:ILO-1 and IHI+1:N. ILO and IHI are normally set +-* by a previous call to SGGBAL; otherwise they should be set +-* to 1 and N respectively. ++* ILO and IHI mark the rows and columns of A which are to be ++* reduced. It is assumed that A is already upper triangular ++* in rows and columns 1:ILO-1 and IHI+1:N. ILO and IHI are ++* normally set by a previous call to SGGBAL; otherwise they ++* should be set to 1 and N respectively. + * 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0. + * + * A (input/output) REAL array, dimension (LDA, N) +@@ -83,33 +100,28 @@ + * + * B (input/output) REAL array, dimension (LDB, N) + * On entry, the N-by-N upper triangular matrix B. +-* On exit, the upper triangular matrix T = Q' B Z. The ++* On exit, the upper triangular matrix T = Q**T B Z. The + * elements below the diagonal are set to zero. + * + * LDB (input) INTEGER + * The leading dimension of the array B. LDB >= max(1,N). + * + * Q (input/output) REAL array, dimension (LDQ, N) +-* If COMPQ='N': Q is not referenced. +-* If COMPQ='I': on entry, Q need not be set, and on exit it +-* contains the orthogonal matrix Q, where Q' +-* is the product of the Givens transformations +-* which are applied to A and B on the left. +-* If COMPQ='V': on entry, Q must contain an orthogonal matrix +-* Q1, and on exit this is overwritten by Q1*Q. ++* On entry, if COMPQ = 'V', the orthogonal matrix Q1, ++* typically from the QR factorization of B. ++* On exit, if COMPQ='I', the orthogonal matrix Q, and if ++* COMPQ = 'V', the product Q1*Q. ++* Not referenced if COMPQ='N'. + * + * LDQ (input) INTEGER + * The leading dimension of the array Q. + * LDQ >= N if COMPQ='V' or 'I'; LDQ >= 1 otherwise. + * + * Z (input/output) REAL array, dimension (LDZ, N) +-* If COMPZ='N': Z is not referenced. +-* If COMPZ='I': on entry, Z need not be set, and on exit it +-* contains the orthogonal matrix Z, which is +-* the product of the Givens transformations +-* which are applied to A and B on the right. +-* If COMPZ='V': on entry, Z must contain an orthogonal matrix +-* Z1, and on exit this is overwritten by Z1*Z. ++* On entry, if COMPZ = 'V', the orthogonal matrix Z1. ++* On exit, if COMPZ='I', the orthogonal matrix Z, and if ++* COMPZ = 'V', the product Z1*Z. ++* Not referenced if COMPZ='N'. + * + * LDZ (input) INTEGER + * The leading dimension of the array Z. +diff -uNr LAPACK.orig/TIMING/EIG/EIGSRC/shgeqz.f LAPACK/TIMING/EIG/EIGSRC/shgeqz.f +--- LAPACK.orig/TIMING/EIG/EIGSRC/shgeqz.f Thu Nov 4 14:28:33 1999 ++++ LAPACK/TIMING/EIG/EIGSRC/shgeqz.f Fri May 25 16:20:29 2001 +@@ -1,20 +1,20 @@ +- SUBROUTINE SHGEQZ( JOB, COMPQ, COMPZ, N, ILO, IHI, A, LDA, B, LDB, ++ SUBROUTINE SHGEQZ( JOB, COMPQ, COMPZ, N, ILO, IHI, H, LDH, T, LDT, + $ ALPHAR, ALPHAI, BETA, Q, LDQ, Z, LDZ, WORK, + $ LWORK, INFO ) + * + * -- LAPACK routine (instrumented to count operations, version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* June 30, 1999 ++* May 3, 2001 + * + * .. Scalar Arguments .. + CHARACTER COMPQ, COMPZ, JOB +- INTEGER IHI, ILO, INFO, LDA, LDB, LDQ, LDZ, LWORK, N ++ INTEGER IHI, ILO, INFO, LDH, LDQ, LDT, LDZ, LWORK, N + * .. + * .. Array Arguments .. +- REAL A( LDA, * ), ALPHAI( * ), ALPHAR( * ), +- $ B( LDB, * ), BETA( * ), Q( LDQ, * ), WORK( * ), +- $ Z( LDZ, * ) ++ REAL ALPHAI( * ), ALPHAR( * ), BETA( * ), ++ $ H( LDH, * ), Q( LDQ, * ), T( LDT, * ), ++ $ WORK( * ), Z( LDZ, * ) + * .. + * ---------------------- Begin Timing Code ------------------------- + * Common block to return operation count and iteration count +@@ -32,37 +32,56 @@ + * Purpose + * ======= + * +-* SHGEQZ implements a single-/double-shift version of the QZ method for +-* finding the generalized eigenvalues +-* +-* w(j)=(ALPHAR(j) + i*ALPHAI(j))/BETAR(j) of the equation +-* +-* det( A - w(i) B ) = 0 +-* +-* In addition, the pair A,B may be reduced to generalized Schur form: +-* B is upper triangular, and A is block upper triangular, where the +-* diagonal blocks are either 1-by-1 or 2-by-2, the 2-by-2 blocks having +-* complex generalized eigenvalues (see the description of the argument +-* JOB.) +-* +-* If JOB='S', then the pair (A,B) is simultaneously reduced to Schur +-* form by applying one orthogonal tranformation (usually called Q) on +-* the left and another (usually called Z) on the right. The 2-by-2 +-* upper-triangular diagonal blocks of B corresponding to 2-by-2 blocks +-* of A will be reduced to positive diagonal matrices. (I.e., +-* if A(j+1,j) is non-zero, then B(j+1,j)=B(j,j+1)=0 and B(j,j) and +-* B(j+1,j+1) will be positive.) +-* +-* If JOB='E', then at each iteration, the same transformations +-* are computed, but they are only applied to those parts of A and B +-* which are needed to compute ALPHAR, ALPHAI, and BETAR. +-* +-* If JOB='S' and COMPQ and COMPZ are 'V' or 'I', then the orthogonal +-* transformations used to reduce (A,B) are accumulated into the arrays +-* Q and Z s.t.: +-* +-* Q(in) A(in) Z(in)* = Q(out) A(out) Z(out)* +-* Q(in) B(in) Z(in)* = Q(out) B(out) Z(out)* ++* SHGEQZ computes the eigenvalues of a real matrix pair (H,T), ++* where H is an upper Hessenberg matrix and T is upper triangular, ++* using the double-shift QZ method. ++* Matrix pairs of this type are produced by the reduction to ++* generalized upper Hessenberg form of a real matrix pair (A,B): ++* ++* A = Q1*H*Z1**T, B = Q1*T*Z1**T, ++* ++* as computed by SGGHRD. ++* ++* If JOB='S', then the Hessenberg-triangular pair (H,T) is ++* also reduced to generalized Schur form, ++* ++* H = Q*S*Z**T, T = Q*P*Z**T, ++* ++* where Q and Z are orthogonal matrices, P is an upper triangular ++* matrix, and S is a quasi-triangular matrix with 1-by-1 and 2-by-2 ++* diagonal blocks. ++* ++* The 1-by-1 blocks correspond to real eigenvalues of the matrix pair ++* (H,T) and the 2-by-2 blocks correspond to complex conjugate pairs of ++* eigenvalues. ++* ++* Additionally, the 2-by-2 upper triangular diagonal blocks of P ++* corresponding to 2-by-2 blocks of S are reduced to positive diagonal ++* form, i.e., if S(j+1,j) is non-zero, then P(j+1,j) = P(j,j+1) = 0, ++* P(j,j) > 0, and P(j+1,j+1) > 0. ++* ++* Optionally, the orthogonal matrix Q from the generalized Schur ++* factorization may be postmultiplied into an input matrix Q1, and the ++* orthogonal matrix Z may be postmultiplied into an input matrix Z1. ++* If Q1 and Z1 are the orthogonal matrices from SGGHRD that reduced ++* the matrix pair (A,B) to generalized upper Hessenberg form, then the ++* output matrices Q1*Q and Z1*Z are the orthogonal factors from the ++* generalized Schur factorization of (A,B): ++* ++* A = (Q1*Q)*S*(Z1*Z)**T, B = (Q1*Q)*P*(Z1*Z)**T. ++* ++* To avoid overflow, eigenvalues of the matrix pair (H,T) (equivalently, ++* of (A,B)) are computed as a pair of values (alpha,beta), where alpha is ++* complex and beta real. ++* If beta is nonzero, lambda = alpha / beta is an eigenvalue of the ++* generalized nonsymmetric eigenvalue problem (GNEP) ++* A*x = lambda*B*x ++* and if alpha is nonzero, mu = beta / alpha is an eigenvalue of the ++* alternate form of the GNEP ++* mu*A*y = B*y. ++* Real eigenvalues can be read directly from the generalized Schur ++* form: ++* alpha = S(i,i), beta = P(i,i). + * + * Ref: C.B. Moler & G.W. Stewart, "An Algorithm for Generalized Matrix + * Eigenvalue Problems", SIAM J. Numer. Anal., 10(1973), +@@ -72,114 +91,98 @@ + * ========= + * + * JOB (input) CHARACTER*1 +-* = 'E': compute only ALPHAR, ALPHAI, and BETA. A and B will +-* not necessarily be put into generalized Schur form. +-* = 'S': put A and B into generalized Schur form, as well +-* as computing ALPHAR, ALPHAI, and BETA. ++* = 'E': Compute eigenvalues only; ++* = 'S': Compute eigenvalues and the Schur form. + * + * COMPQ (input) CHARACTER*1 +-* = 'N': do not modify Q. +-* = 'V': multiply the array Q on the right by the transpose of +-* the orthogonal tranformation that is applied to the +-* left side of A and B to reduce them to Schur form. +-* = 'I': like COMPQ='V', except that Q will be initialized to +-* the identity first. ++* = 'N': Left Schur vectors (Q) are not computed; ++* = 'I': Q is initialized to the unit matrix and the matrix Q ++* of left Schur vectors of (H,T) is returned; ++* = 'V': Q must contain an orthogonal matrix Q1 on entry and ++* the product Q1*Q is returned. + * + * COMPZ (input) CHARACTER*1 +-* = 'N': do not modify Z. +-* = 'V': multiply the array Z on the right by the orthogonal +-* tranformation that is applied to the right side of +-* A and B to reduce them to Schur form. +-* = 'I': like COMPZ='V', except that Z will be initialized to +-* the identity first. ++* = 'N': Right Schur vectors (Z) are not computed; ++* = 'I': Z is initialized to the unit matrix and the matrix Z ++* of right Schur vectors of (H,T) is returned; ++* = 'V': Z must contain an orthogonal matrix Z1 on entry and ++* the product Z1*Z is returned. + * + * N (input) INTEGER +-* The order of the matrices A, B, Q, and Z. N >= 0. ++* The order of the matrices H, T, Q, and Z. N >= 0. + * + * ILO (input) INTEGER + * IHI (input) INTEGER +-* It is assumed that A is already upper triangular in rows and +-* columns 1:ILO-1 and IHI+1:N. +-* 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0. +-* +-* A (input/output) REAL array, dimension (LDA, N) +-* On entry, the N-by-N upper Hessenberg matrix A. Elements +-* below the subdiagonal must be zero. +-* If JOB='S', then on exit A and B will have been +-* simultaneously reduced to generalized Schur form. +-* If JOB='E', then on exit A will have been destroyed. +-* The diagonal blocks will be correct, but the off-diagonal +-* portion will be meaningless. +-* +-* LDA (input) INTEGER +-* The leading dimension of the array A. LDA >= max( 1, N ). +-* +-* B (input/output) REAL array, dimension (LDB, N) +-* On entry, the N-by-N upper triangular matrix B. Elements +-* below the diagonal must be zero. 2-by-2 blocks in B +-* corresponding to 2-by-2 blocks in A will be reduced to +-* positive diagonal form. (I.e., if A(j+1,j) is non-zero, +-* then B(j+1,j)=B(j,j+1)=0 and B(j,j) and B(j+1,j+1) will be +-* positive.) +-* If JOB='S', then on exit A and B will have been +-* simultaneously reduced to Schur form. +-* If JOB='E', then on exit B will have been destroyed. +-* Elements corresponding to diagonal blocks of A will be +-* correct, but the off-diagonal portion will be meaningless. ++* ILO and IHI mark the rows and columns of H which are in ++* Hessenberg form. It is assumed that A is already upper ++* triangular in rows and columns 1:ILO-1 and IHI+1:N. ++* If N > 0, 1 <= ILO <= IHI <= N; if N = 0, ILO=1 and IHI=0. ++* ++* H (input/output) REAL array, dimension (LDH, N) ++* On entry, the N-by-N upper Hessenberg matrix H. ++* On exit, if JOB = 'S', H contains the upper quasi-triangular ++* matrix S from the generalized Schur factorization; ++* 2-by-2 diagonal blocks (corresponding to complex conjugate ++* pairs of eigenvalues) are returned in standard form, with ++* H(i,i) = H(i+1,i+1) and H(i+1,i)*H(i,i+1) < 0. ++* If JOB = 'E', the diagonal blocks of H match those of S, but ++* the rest of H is unspecified. ++* ++* LDH (input) INTEGER ++* The leading dimension of the array H. LDH >= max( 1, N ). ++* ++* T (input/output) REAL array, dimension (LDT, N) ++* On entry, the N-by-N upper triangular matrix T. ++* On exit, if JOB = 'S', T contains the upper triangular ++* matrix P from the generalized Schur factorization; ++* 2-by-2 diagonal blocks of P corresponding to 2-by-2 blocks of S ++* are reduced to positive diagonal form, i.e., if H(j+1,j) is ++* non-zero, then T(j+1,j) = T(j,j+1) = 0, T(j,j) > 0, and ++* T(j+1,j+1) > 0. ++* If JOB = 'E', the diagonal blocks of T match those of P, but ++* the rest of T is unspecified. + * +-* LDB (input) INTEGER +-* The leading dimension of the array B. LDB >= max( 1, N ). ++* LDT (input) INTEGER ++* The leading dimension of the array T. LDT >= max( 1, N ). + * + * ALPHAR (output) REAL array, dimension (N) +-* ALPHAR(1:N) will be set to real parts of the diagonal +-* elements of A that would result from reducing A and B to +-* Schur form and then further reducing them both to triangular +-* form using unitary transformations s.t. the diagonal of B +-* was non-negative real. Thus, if A(j,j) is in a 1-by-1 block +-* (i.e., A(j+1,j)=A(j,j+1)=0), then ALPHAR(j)=A(j,j). +-* Note that the (real or complex) values +-* (ALPHAR(j) + i*ALPHAI(j))/BETA(j), j=1,...,N, are the +-* generalized eigenvalues of the matrix pencil A - wB. ++* The real parts of each scalar alpha defining an eigenvalue ++* of GNEP. + * + * ALPHAI (output) REAL array, dimension (N) +-* ALPHAI(1:N) will be set to imaginary parts of the diagonal +-* elements of A that would result from reducing A and B to +-* Schur form and then further reducing them both to triangular +-* form using unitary transformations s.t. the diagonal of B +-* was non-negative real. Thus, if A(j,j) is in a 1-by-1 block +-* (i.e., A(j+1,j)=A(j,j+1)=0), then ALPHAR(j)=0. +-* Note that the (real or complex) values +-* (ALPHAR(j) + i*ALPHAI(j))/BETA(j), j=1,...,N, are the +-* generalized eigenvalues of the matrix pencil A - wB. ++* The imaginary parts of each scalar alpha defining an ++* eigenvalue of GNEP. ++* If ALPHAI(j) is zero, then the j-th eigenvalue is real; if ++* positive, then the j-th and (j+1)-st eigenvalues are a ++* complex conjugate pair, with ALPHAI(j+1) = -ALPHAI(j). + * + * BETA (output) REAL array, dimension (N) +-* BETA(1:N) will be set to the (real) diagonal elements of B +-* that would result from reducing A and B to Schur form and +-* then further reducing them both to triangular form using +-* unitary transformations s.t. the diagonal of B was +-* non-negative real. Thus, if A(j,j) is in a 1-by-1 block +-* (i.e., A(j+1,j)=A(j,j+1)=0), then BETA(j)=B(j,j). +-* Note that the (real or complex) values +-* (ALPHAR(j) + i*ALPHAI(j))/BETA(j), j=1,...,N, are the +-* generalized eigenvalues of the matrix pencil A - wB. +-* (Note that BETA(1:N) will always be non-negative, and no +-* BETAI is necessary.) ++* The scalars beta that define the eigenvalues of GNEP. ++* Together, the quantities alpha = (ALPHAR(j),ALPHAI(j)) and ++* beta = BETA(j) represent the j-th eigenvalue of the matrix ++* pair (A,B), in one of the forms lambda = alpha/beta or ++* mu = beta/alpha. Since either lambda or mu may overflow, ++* they should not, in general, be computed. + * + * Q (input/output) REAL array, dimension (LDQ, N) +-* If COMPQ='N', then Q will not be referenced. +-* If COMPQ='V' or 'I', then the transpose of the orthogonal +-* transformations which are applied to A and B on the left +-* will be applied to the array Q on the right. ++* On entry, if COMPZ = 'V', the orthogonal matrix Q1 used in ++* the reduction of (A,B) to generalized Hessenberg form. ++* On exit, if COMPZ = 'I', the orthogonal matrix of left Schur ++* vectors of (H,T), and if COMPZ = 'V', the orthogonal matrix ++* of left Schur vectors of (A,B). ++* Not referenced if COMPZ = 'N'. + * + * LDQ (input) INTEGER + * The leading dimension of the array Q. LDQ >= 1. + * If COMPQ='V' or 'I', then LDQ >= N. + * + * Z (input/output) REAL array, dimension (LDZ, N) +-* If COMPZ='N', then Z will not be referenced. +-* If COMPZ='V' or 'I', then the orthogonal transformations +-* which are applied to A and B on the right will be applied +-* to the array Z on the right. ++* On entry, if COMPZ = 'V', the orthogonal matrix Z1 used in ++* the reduction of (A,B) to generalized Hessenberg form. ++* On exit, if COMPZ = 'I', the orthogonal matrix of ++* right Schur vectors of (H,T), and if COMPZ = 'V', the ++* orthogonal matrix of right Schur vectors of (A,B). ++* Not referenced if COMPZ = 'N'. + * + * LDZ (input) INTEGER + * The leading dimension of the array Z. LDZ >= 1. +@@ -199,13 +202,12 @@ + * INFO (output) INTEGER + * = 0: successful exit + * < 0: if INFO = -i, the i-th argument had an illegal value +-* = 1,...,N: the QZ iteration did not converge. (A,B) is not ++* = 1,...,N: the QZ iteration did not converge. (H,T) is not + * in Schur form, but ALPHAR(i), ALPHAI(i), and + * BETA(i), i=INFO+1,...,N should be correct. +-* = N+1,...,2*N: the shift calculation failed. (A,B) is not ++* = N+1,...,2*N: the shift calculation failed. (H,T) is not + * in Schur form, but ALPHAR(i), ALPHAI(i), and + * BETA(i), i=INFO-N+1,...,N should be correct. +-* > 2*N: various "impossible" errors. + * + * Further Details + * =============== +@@ -237,7 +239,7 @@ + $ BTOL, C, C11I, C11R, C12, C21, C22I, C22R, CL, + $ CQ, CR, CZ, ESHIFT, OPST, S, S1, S1INV, S2, + $ SAFMAX, SAFMIN, SCALE, SL, SQI, SQR, SR, SZI, +- $ SZR, T, TAU, TEMP, TEMP2, TEMPI, TEMPR, U1, ++ $ SZR, T1, TAU, TEMP, TEMP2, TEMPI, TEMPR, U1, + $ U12, U12L, U2, ULP, VS, W11, W12, W21, W22, + $ WABS, WI, WR, WR2 + * .. +@@ -319,9 +321,9 @@ + INFO = -5 + ELSE IF( IHI.GT.N .OR. IHI.LT.ILO-1 ) THEN + INFO = -6 +- ELSE IF( LDA.LT.N ) THEN ++ ELSE IF( LDH.LT.N ) THEN + INFO = -8 +- ELSE IF( LDB.LT.N ) THEN ++ ELSE IF( LDT.LT.N ) THEN + INFO = -10 + ELSE IF( LDQ.LT.1 .OR. ( ILQ .AND. LDQ.LT.N ) ) THEN + INFO = -15 +@@ -360,8 +362,8 @@ + SAFMIN = SLAMCH( 'S' ) + SAFMAX = ONE / SAFMIN + ULP = SLAMCH( 'E' )*SLAMCH( 'B' ) +- ANORM = SLANHS( 'F', IN, A( ILO, ILO ), LDA, WORK ) +- BNORM = SLANHS( 'F', IN, B( ILO, ILO ), LDB, WORK ) ++ ANORM = SLANHS( 'F', IN, H( ILO, ILO ), LDH, WORK ) ++ BNORM = SLANHS( 'F', IN, T( ILO, ILO ), LDT, WORK ) + ATOL = MAX( SAFMIN, ULP*ANORM ) + BTOL = MAX( SAFMIN, ULP*BNORM ) + ASCALE = ONE / MAX( SAFMIN, ANORM ) +@@ -370,15 +372,15 @@ + * Set Eigenvalues IHI+1:N + * + DO 30 J = IHI + 1, N +- IF( B( J, J ).LT.ZERO ) THEN ++ IF( T( J, J ).LT.ZERO ) THEN + IF( ILSCHR ) THEN + DO 10 JR = 1, J +- A( JR, J ) = -A( JR, J ) +- B( JR, J ) = -B( JR, J ) ++ H( JR, J ) = -H( JR, J ) ++ T( JR, J ) = -T( JR, J ) + 10 CONTINUE + ELSE +- A( J, J ) = -A( J, J ) +- B( J, J ) = -B( J, J ) ++ H( J, J ) = -H( J, J ) ++ T( J, J ) = -T( J, J ) + END IF + IF( ILZ ) THEN + DO 20 JR = 1, N +@@ -386,9 +388,9 @@ + 20 CONTINUE + END IF + END IF +- ALPHAR( J ) = A( J, J ) ++ ALPHAR( J ) = H( J, J ) + ALPHAI( J ) = ZERO +- BETA( J ) = B( J, J ) ++ BETA( J ) = T( J, J ) + 30 CONTINUE + * + * ---------------------- Begin Timing Code ------------------------- +@@ -435,8 +437,8 @@ + * Split the matrix if possible. + * + * Two tests: +-* 1: A(j,j-1)=0 or j=ILO +-* 2: B(j,j)=0 ++* 1: H(j,j-1)=0 or j=ILO ++* 2: T(j,j)=0 + * + IF( ILAST.EQ.ILO ) THEN + * +@@ -444,14 +446,14 @@ + * + GO TO 80 + ELSE +- IF( ABS( A( ILAST, ILAST-1 ) ).LE.ATOL ) THEN +- A( ILAST, ILAST-1 ) = ZERO ++ IF( ABS( H( ILAST, ILAST-1 ) ).LE.ATOL ) THEN ++ H( ILAST, ILAST-1 ) = ZERO + GO TO 80 + END IF + END IF + * +- IF( ABS( B( ILAST, ILAST ) ).LE.BTOL ) THEN +- B( ILAST, ILAST ) = ZERO ++ IF( ABS( T( ILAST, ILAST ) ).LE.BTOL ) THEN ++ T( ILAST, ILAST ) = ZERO + GO TO 70 + END IF + * +@@ -459,36 +461,36 @@ + * + DO 60 J = ILAST - 1, ILO, -1 + * +-* Test 1: for A(j,j-1)=0 or j=ILO ++* Test 1: for H(j,j-1)=0 or j=ILO + * + IF( J.EQ.ILO ) THEN + ILAZRO = .TRUE. + ELSE +- IF( ABS( A( J, J-1 ) ).LE.ATOL ) THEN +- A( J, J-1 ) = ZERO ++ IF( ABS( H( J, J-1 ) ).LE.ATOL ) THEN ++ H( J, J-1 ) = ZERO + ILAZRO = .TRUE. + ELSE + ILAZRO = .FALSE. + END IF + END IF + * +-* Test 2: for B(j,j)=0 ++* Test 2: for T(j,j)=0 + * +- IF( ABS( B( J, J ) ).LT.BTOL ) THEN +- B( J, J ) = ZERO ++ IF( ABS( T( J, J ) ).LT.BTOL ) THEN ++ T( J, J ) = ZERO + * + * Test 1a: Check for 2 consecutive small subdiagonals in A + * + ILAZR2 = .FALSE. + IF( .NOT.ILAZRO ) THEN +- TEMP = ABS( A( J, J-1 ) ) +- TEMP2 = ABS( A( J, J ) ) ++ TEMP = ABS( H( J, J-1 ) ) ++ TEMP2 = ABS( H( J, J ) ) + TEMPR = MAX( TEMP, TEMP2 ) + IF( TEMPR.LT.ONE .AND. TEMPR.NE.ZERO ) THEN + TEMP = TEMP / TEMPR + TEMP2 = TEMP2 / TEMPR + END IF +- IF( TEMP*( ASCALE*ABS( A( J+1, J ) ) ).LE.TEMP2* ++ IF( TEMP*( ASCALE*ABS( H( J+1, J ) ) ).LE.TEMP2* + $ ( ASCALE*ATOL ) )ILAZR2 = .TRUE. + END IF + * +@@ -500,26 +502,26 @@ + * + IF( ILAZRO .OR. ILAZR2 ) THEN + DO 40 JCH = J, ILAST - 1 +- TEMP = A( JCH, JCH ) +- CALL SLARTG( TEMP, A( JCH+1, JCH ), C, S, +- $ A( JCH, JCH ) ) +- A( JCH+1, JCH ) = ZERO +- CALL SROT( ILASTM-JCH, A( JCH, JCH+1 ), LDA, +- $ A( JCH+1, JCH+1 ), LDA, C, S ) +- CALL SROT( ILASTM-JCH, B( JCH, JCH+1 ), LDB, +- $ B( JCH+1, JCH+1 ), LDB, C, S ) ++ TEMP = H( JCH, JCH ) ++ CALL SLARTG( TEMP, H( JCH+1, JCH ), C, S, ++ $ H( JCH, JCH ) ) ++ H( JCH+1, JCH ) = ZERO ++ CALL SROT( ILASTM-JCH, H( JCH, JCH+1 ), LDH, ++ $ H( JCH+1, JCH+1 ), LDH, C, S ) ++ CALL SROT( ILASTM-JCH, T( JCH, JCH+1 ), LDT, ++ $ T( JCH+1, JCH+1 ), LDT, C, S ) + IF( ILQ ) + $ CALL SROT( N, Q( 1, JCH ), 1, Q( 1, JCH+1 ), 1, + $ C, S ) + IF( ILAZR2 ) +- $ A( JCH, JCH-1 ) = A( JCH, JCH-1 )*C ++ $ H( JCH, JCH-1 ) = H( JCH, JCH-1 )*C + ILAZR2 = .FALSE. + * + * --------------- Begin Timing Code ----------------- + OPST = OPST + REAL( 7+12*( ILASTM-JCH )+6*NQ ) + * ---------------- End Timing Code ------------------ + * +- IF( ABS( B( JCH+1, JCH+1 ) ).GE.BTOL ) THEN ++ IF( ABS( T( JCH+1, JCH+1 ) ).GE.BTOL ) THEN + IF( JCH+1.GE.ILAST ) THEN + GO TO 80 + ELSE +@@ -527,35 +529,35 @@ + GO TO 110 + END IF + END IF +- B( JCH+1, JCH+1 ) = ZERO ++ T( JCH+1, JCH+1 ) = ZERO + 40 CONTINUE + GO TO 70 + ELSE + * +-* Only test 2 passed -- chase the zero to B(ILAST,ILAST) +-* Then process as in the case B(ILAST,ILAST)=0 ++* Only test 2 passed -- chase the zero to T(ILAST,ILAST) ++* Then process as in the case T(ILAST,ILAST)=0 + * + DO 50 JCH = J, ILAST - 1 +- TEMP = B( JCH, JCH+1 ) +- CALL SLARTG( TEMP, B( JCH+1, JCH+1 ), C, S, +- $ B( JCH, JCH+1 ) ) +- B( JCH+1, JCH+1 ) = ZERO ++ TEMP = T( JCH, JCH+1 ) ++ CALL SLARTG( TEMP, T( JCH+1, JCH+1 ), C, S, ++ $ T( JCH, JCH+1 ) ) ++ T( JCH+1, JCH+1 ) = ZERO + IF( JCH.LT.ILASTM-1 ) +- $ CALL SROT( ILASTM-JCH-1, B( JCH, JCH+2 ), LDB, +- $ B( JCH+1, JCH+2 ), LDB, C, S ) +- CALL SROT( ILASTM-JCH+2, A( JCH, JCH-1 ), LDA, +- $ A( JCH+1, JCH-1 ), LDA, C, S ) ++ $ CALL SROT( ILASTM-JCH-1, T( JCH, JCH+2 ), LDT, ++ $ T( JCH+1, JCH+2 ), LDT, C, S ) ++ CALL SROT( ILASTM-JCH+2, H( JCH, JCH-1 ), LDH, ++ $ H( JCH+1, JCH-1 ), LDH, C, S ) + IF( ILQ ) + $ CALL SROT( N, Q( 1, JCH ), 1, Q( 1, JCH+1 ), 1, + $ C, S ) +- TEMP = A( JCH+1, JCH ) +- CALL SLARTG( TEMP, A( JCH+1, JCH-1 ), C, S, +- $ A( JCH+1, JCH ) ) +- A( JCH+1, JCH-1 ) = ZERO +- CALL SROT( JCH+1-IFRSTM, A( IFRSTM, JCH ), 1, +- $ A( IFRSTM, JCH-1 ), 1, C, S ) +- CALL SROT( JCH-IFRSTM, B( IFRSTM, JCH ), 1, +- $ B( IFRSTM, JCH-1 ), 1, C, S ) ++ TEMP = H( JCH+1, JCH ) ++ CALL SLARTG( TEMP, H( JCH+1, JCH-1 ), C, S, ++ $ H( JCH+1, JCH ) ) ++ H( JCH+1, JCH-1 ) = ZERO ++ CALL SROT( JCH+1-IFRSTM, H( IFRSTM, JCH ), 1, ++ $ H( IFRSTM, JCH-1 ), 1, C, S ) ++ CALL SROT( JCH-IFRSTM, T( IFRSTM, JCH ), 1, ++ $ T( IFRSTM, JCH-1 ), 1, C, S ) + IF( ILZ ) + $ CALL SROT( N, Z( 1, JCH ), 1, Z( 1, JCH-1 ), 1, + $ C, S ) +@@ -585,18 +587,18 @@ + INFO = N + 1 + GO TO 420 + * +-* B(ILAST,ILAST)=0 -- clear A(ILAST,ILAST-1) to split off a ++* T(ILAST,ILAST)=0 -- clear H(ILAST,ILAST-1) to split off a + * 1x1 block. + * + 70 CONTINUE +- TEMP = A( ILAST, ILAST ) +- CALL SLARTG( TEMP, A( ILAST, ILAST-1 ), C, S, +- $ A( ILAST, ILAST ) ) +- A( ILAST, ILAST-1 ) = ZERO +- CALL SROT( ILAST-IFRSTM, A( IFRSTM, ILAST ), 1, +- $ A( IFRSTM, ILAST-1 ), 1, C, S ) +- CALL SROT( ILAST-IFRSTM, B( IFRSTM, ILAST ), 1, +- $ B( IFRSTM, ILAST-1 ), 1, C, S ) ++ TEMP = H( ILAST, ILAST ) ++ CALL SLARTG( TEMP, H( ILAST, ILAST-1 ), C, S, ++ $ H( ILAST, ILAST ) ) ++ H( ILAST, ILAST-1 ) = ZERO ++ CALL SROT( ILAST-IFRSTM, H( IFRSTM, ILAST ), 1, ++ $ H( IFRSTM, ILAST-1 ), 1, C, S ) ++ CALL SROT( ILAST-IFRSTM, T( IFRSTM, ILAST ), 1, ++ $ T( IFRSTM, ILAST-1 ), 1, C, S ) + IF( ILZ ) + $ CALL SROT( N, Z( 1, ILAST ), 1, Z( 1, ILAST-1 ), 1, C, S ) + * +@@ -605,19 +607,19 @@ + * ---------------------- End Timing Code ------------------------ + * + * +-* A(ILAST,ILAST-1)=0 -- Standardize B, set ALPHAR, ALPHAI, ++* H(ILAST,ILAST-1)=0 -- Standardize B, set ALPHAR, ALPHAI, + * and BETA + * + 80 CONTINUE +- IF( B( ILAST, ILAST ).LT.ZERO ) THEN ++ IF( T( ILAST, ILAST ).LT.ZERO ) THEN + IF( ILSCHR ) THEN + DO 90 J = IFRSTM, ILAST +- A( J, ILAST ) = -A( J, ILAST ) +- B( J, ILAST ) = -B( J, ILAST ) ++ H( J, ILAST ) = -H( J, ILAST ) ++ T( J, ILAST ) = -T( J, ILAST ) + 90 CONTINUE + ELSE +- A( ILAST, ILAST ) = -A( ILAST, ILAST ) +- B( ILAST, ILAST ) = -B( ILAST, ILAST ) ++ H( ILAST, ILAST ) = -H( ILAST, ILAST ) ++ T( ILAST, ILAST ) = -T( ILAST, ILAST ) + END IF + IF( ILZ ) THEN + DO 100 J = 1, N +@@ -625,9 +627,9 @@ + 100 CONTINUE + END IF + END IF +- ALPHAR( ILAST ) = A( ILAST, ILAST ) ++ ALPHAR( ILAST ) = H( ILAST, ILAST ) + ALPHAI( ILAST ) = ZERO +- BETA( ILAST ) = B( ILAST, ILAST ) ++ BETA( ILAST ) = T( ILAST, ILAST ) + * + * Go to next block -- exit if finished. + * +@@ -660,7 +662,7 @@ + * Compute single shifts. + * + * At this point, IFIRST < ILAST, and the diagonal elements of +-* B(IFIRST:ILAST,IFIRST,ILAST) are larger than BTOL (in ++* T(IFIRST:ILAST,IFIRST,ILAST) are larger than BTOL (in + * magnitude) + * + IF( ( IITER / 10 )*10.EQ.IITER ) THEN +@@ -668,10 +670,10 @@ + * Exceptional shift. Chosen for no particularly good reason. + * (Single shift only.) + * +- IF( ( REAL( MAXIT )*SAFMIN )*ABS( A( ILAST-1, ILAST ) ).LT. +- $ ABS( B( ILAST-1, ILAST-1 ) ) ) THEN +- ESHIFT = ESHIFT + A( ILAST-1, ILAST ) / +- $ B( ILAST-1, ILAST-1 ) ++ IF( ( REAL( MAXIT )*SAFMIN )*ABS( H( ILAST-1, ILAST ) ).LT. ++ $ ABS( T( ILAST-1, ILAST-1 ) ) ) THEN ++ ESHIFT = ESHIFT + H( ILAST-1, ILAST ) / ++ $ T( ILAST-1, ILAST-1 ) + ELSE + ESHIFT = ESHIFT + ONE / ( SAFMIN*REAL( MAXIT ) ) + END IF +@@ -688,8 +690,8 @@ + * bottom-right 2x2 block of A and B. The first eigenvalue + * returned by SLAG2 is the Wilkinson shift (AEP p.512), + * +- CALL SLAG2( A( ILAST-1, ILAST-1 ), LDA, +- $ B( ILAST-1, ILAST-1 ), LDB, SAFMIN*SAFETY, S1, ++ CALL SLAG2( H( ILAST-1, ILAST-1 ), LDH, ++ $ T( ILAST-1, ILAST-1 ), LDT, SAFMIN*SAFETY, S1, + $ S2, WR, WR2, WI ) + * + TEMP = MAX( S1, SAFMIN*MAX( ONE, ABS( WR ), ABS( WI ) ) ) +@@ -721,14 +723,14 @@ + * + DO 120 J = ILAST - 1, IFIRST + 1, -1 + ISTART = J +- TEMP = ABS( S1*A( J, J-1 ) ) +- TEMP2 = ABS( S1*A( J, J )-WR*B( J, J ) ) ++ TEMP = ABS( S1*H( J, J-1 ) ) ++ TEMP2 = ABS( S1*H( J, J )-WR*T( J, J ) ) + TEMPR = MAX( TEMP, TEMP2 ) + IF( TEMPR.LT.ONE .AND. TEMPR.NE.ZERO ) THEN + TEMP = TEMP / TEMPR + TEMP2 = TEMP2 / TEMPR + END IF +- IF( ABS( ( ASCALE*A( J+1, J ) )*TEMP ).LE.( ASCALE*ATOL )* ++ IF( ABS( ( ASCALE*H( J+1, J ) )*TEMP ).LE.( ASCALE*ATOL )* + $ TEMP2 )GO TO 130 + 120 CONTINUE + * +@@ -739,26 +741,26 @@ + * + * Initial Q + * +- TEMP = S1*A( ISTART, ISTART ) - WR*B( ISTART, ISTART ) +- TEMP2 = S1*A( ISTART+1, ISTART ) ++ TEMP = S1*H( ISTART, ISTART ) - WR*T( ISTART, ISTART ) ++ TEMP2 = S1*H( ISTART+1, ISTART ) + CALL SLARTG( TEMP, TEMP2, C, S, TEMPR ) + * + * Sweep + * + DO 190 J = ISTART, ILAST - 1 + IF( J.GT.ISTART ) THEN +- TEMP = A( J, J-1 ) +- CALL SLARTG( TEMP, A( J+1, J-1 ), C, S, A( J, J-1 ) ) +- A( J+1, J-1 ) = ZERO ++ TEMP = H( J, J-1 ) ++ CALL SLARTG( TEMP, H( J+1, J-1 ), C, S, H( J, J-1 ) ) ++ H( J+1, J-1 ) = ZERO + END IF + * + DO 140 JC = J, ILASTM +- TEMP = C*A( J, JC ) + S*A( J+1, JC ) +- A( J+1, JC ) = -S*A( J, JC ) + C*A( J+1, JC ) +- A( J, JC ) = TEMP +- TEMP2 = C*B( J, JC ) + S*B( J+1, JC ) +- B( J+1, JC ) = -S*B( J, JC ) + C*B( J+1, JC ) +- B( J, JC ) = TEMP2 ++ TEMP = C*H( J, JC ) + S*H( J+1, JC ) ++ H( J+1, JC ) = -S*H( J, JC ) + C*H( J+1, JC ) ++ H( J, JC ) = TEMP ++ TEMP2 = C*T( J, JC ) + S*T( J+1, JC ) ++ T( J+1, JC ) = -S*T( J, JC ) + C*T( J+1, JC ) ++ T( J, JC ) = TEMP2 + 140 CONTINUE + IF( ILQ ) THEN + DO 150 JR = 1, N +@@ -768,19 +770,19 @@ + 150 CONTINUE + END IF + * +- TEMP = B( J+1, J+1 ) +- CALL SLARTG( TEMP, B( J+1, J ), C, S, B( J+1, J+1 ) ) +- B( J+1, J ) = ZERO ++ TEMP = T( J+1, J+1 ) ++ CALL SLARTG( TEMP, T( J+1, J ), C, S, T( J+1, J+1 ) ) ++ T( J+1, J ) = ZERO + * + DO 160 JR = IFRSTM, MIN( J+2, ILAST ) +- TEMP = C*A( JR, J+1 ) + S*A( JR, J ) +- A( JR, J ) = -S*A( JR, J+1 ) + C*A( JR, J ) +- A( JR, J+1 ) = TEMP ++ TEMP = C*H( JR, J+1 ) + S*H( JR, J ) ++ H( JR, J ) = -S*H( JR, J+1 ) + C*H( JR, J ) ++ H( JR, J+1 ) = TEMP + 160 CONTINUE + DO 170 JR = IFRSTM, J +- TEMP = C*B( JR, J+1 ) + S*B( JR, J ) +- B( JR, J ) = -S*B( JR, J+1 ) + C*B( JR, J ) +- B( JR, J+1 ) = TEMP ++ TEMP = C*T( JR, J+1 ) + S*T( JR, J ) ++ T( JR, J ) = -S*T( JR, J+1 ) + C*T( JR, J ) ++ T( JR, J+1 ) = TEMP + 170 CONTINUE + IF( ILZ ) THEN + DO 180 JR = 1, N +@@ -816,8 +818,8 @@ + * B = ( ) with B11 non-negative. + * ( 0 B22 ) + * +- CALL SLASV2( B( ILAST-1, ILAST-1 ), B( ILAST-1, ILAST ), +- $ B( ILAST, ILAST ), B22, B11, SR, CR, SL, CL ) ++ CALL SLASV2( T( ILAST-1, ILAST-1 ), T( ILAST-1, ILAST ), ++ $ T( ILAST, ILAST ), B22, B11, SR, CR, SL, CL ) + * + IF( B11.LT.ZERO ) THEN + CR = -CR +@@ -826,17 +828,17 @@ + B22 = -B22 + END IF + * +- CALL SROT( ILASTM+1-IFIRST, A( ILAST-1, ILAST-1 ), LDA, +- $ A( ILAST, ILAST-1 ), LDA, CL, SL ) +- CALL SROT( ILAST+1-IFRSTM, A( IFRSTM, ILAST-1 ), 1, +- $ A( IFRSTM, ILAST ), 1, CR, SR ) ++ CALL SROT( ILASTM+1-IFIRST, H( ILAST-1, ILAST-1 ), LDH, ++ $ H( ILAST, ILAST-1 ), LDH, CL, SL ) ++ CALL SROT( ILAST+1-IFRSTM, H( IFRSTM, ILAST-1 ), 1, ++ $ H( IFRSTM, ILAST ), 1, CR, SR ) + * + IF( ILAST.LT.ILASTM ) +- $ CALL SROT( ILASTM-ILAST, B( ILAST-1, ILAST+1 ), LDB, +- $ B( ILAST, ILAST+1 ), LDA, CL, SL ) ++ $ CALL SROT( ILASTM-ILAST, T( ILAST-1, ILAST+1 ), LDT, ++ $ T( ILAST, ILAST+1 ), LDH, CL, SL ) + IF( IFRSTM.LT.ILAST-1 ) +- $ CALL SROT( IFIRST-IFRSTM, B( IFRSTM, ILAST-1 ), 1, +- $ B( IFRSTM, ILAST ), 1, CR, SR ) ++ $ CALL SROT( IFIRST-IFRSTM, T( IFRSTM, ILAST-1 ), 1, ++ $ T( IFRSTM, ILAST ), 1, CR, SR ) + * + IF( ILQ ) + $ CALL SROT( N, Q( 1, ILAST-1 ), 1, Q( 1, ILAST ), 1, CL, +@@ -845,17 +847,17 @@ + $ CALL SROT( N, Z( 1, ILAST-1 ), 1, Z( 1, ILAST ), 1, CR, + $ SR ) + * +- B( ILAST-1, ILAST-1 ) = B11 +- B( ILAST-1, ILAST ) = ZERO +- B( ILAST, ILAST-1 ) = ZERO +- B( ILAST, ILAST ) = B22 ++ T( ILAST-1, ILAST-1 ) = B11 ++ T( ILAST-1, ILAST ) = ZERO ++ T( ILAST, ILAST-1 ) = ZERO ++ T( ILAST, ILAST ) = B22 + * + * If B22 is negative, negate column ILAST + * + IF( B22.LT.ZERO ) THEN + DO 210 J = IFRSTM, ILAST +- A( J, ILAST ) = -A( J, ILAST ) +- B( J, ILAST ) = -B( J, ILAST ) ++ H( J, ILAST ) = -H( J, ILAST ) ++ T( J, ILAST ) = -T( J, ILAST ) + 210 CONTINUE + * + IF( ILZ ) THEN +@@ -869,8 +871,8 @@ + * + * Recompute shift + * +- CALL SLAG2( A( ILAST-1, ILAST-1 ), LDA, +- $ B( ILAST-1, ILAST-1 ), LDB, SAFMIN*SAFETY, S1, ++ CALL SLAG2( H( ILAST-1, ILAST-1 ), LDH, ++ $ T( ILAST-1, ILAST-1 ), LDT, SAFMIN*SAFETY, S1, + $ TEMP, WR, TEMP2, WI ) + * + * ------------------- Begin Timing Code ---------------------- +@@ -887,10 +889,10 @@ + * + * Do EISPACK (QZVAL) computation of alpha and beta + * +- A11 = A( ILAST-1, ILAST-1 ) +- A21 = A( ILAST, ILAST-1 ) +- A12 = A( ILAST-1, ILAST ) +- A22 = A( ILAST, ILAST ) ++ A11 = H( ILAST-1, ILAST-1 ) ++ A21 = H( ILAST, ILAST-1 ) ++ A12 = H( ILAST-1, ILAST ) ++ A22 = H( ILAST, ILAST ) + * + * Compute complex Givens rotation on right + * (Assume some element of C = (sA - wB) > unfl ) +@@ -907,10 +909,10 @@ + * + IF( ABS( C11R )+ABS( C11I )+ABS( C12 ).GT.ABS( C21 )+ + $ ABS( C22R )+ABS( C22I ) ) THEN +- T = SLAPY3( C12, C11R, C11I ) +- CZ = C12 / T +- SZR = -C11R / T +- SZI = -C11I / T ++ T1 = SLAPY3( C12, C11R, C11I ) ++ CZ = C12 / T1 ++ SZR = -C11R / T1 ++ SZI = -C11I / T1 + ELSE + CZ = SLAPY2( C22R, C22I ) + IF( CZ.LE.SAFMIN ) THEN +@@ -920,10 +922,10 @@ + ELSE + TEMPR = C22R / CZ + TEMPI = C22I / CZ +- T = SLAPY2( CZ, C21 ) +- CZ = CZ / T +- SZR = -C21*TEMPR / T +- SZI = C21*TEMPI / T ++ T1 = SLAPY2( CZ, C21 ) ++ CZ = CZ / T1 ++ SZR = -C21*TEMPR / T1 ++ SZI = C21*TEMPI / T1 + END IF + END IF + * +@@ -957,10 +959,10 @@ + SQI = TEMPI*A2R - TEMPR*A2I + END IF + END IF +- T = SLAPY3( CQ, SQR, SQI ) +- CQ = CQ / T +- SQR = SQR / T +- SQI = SQI / T ++ T1 = SLAPY3( CQ, SQR, SQI ) ++ CQ = CQ / T1 ++ SQR = SQR / T1 ++ SQI = SQI / T1 + * + * Compute diagonal elements of QBZ + * +@@ -1016,26 +1018,26 @@ + * + * We assume that the block is at least 3x3 + * +- AD11 = ( ASCALE*A( ILAST-1, ILAST-1 ) ) / +- $ ( BSCALE*B( ILAST-1, ILAST-1 ) ) +- AD21 = ( ASCALE*A( ILAST, ILAST-1 ) ) / +- $ ( BSCALE*B( ILAST-1, ILAST-1 ) ) +- AD12 = ( ASCALE*A( ILAST-1, ILAST ) ) / +- $ ( BSCALE*B( ILAST, ILAST ) ) +- AD22 = ( ASCALE*A( ILAST, ILAST ) ) / +- $ ( BSCALE*B( ILAST, ILAST ) ) +- U12 = B( ILAST-1, ILAST ) / B( ILAST, ILAST ) +- AD11L = ( ASCALE*A( IFIRST, IFIRST ) ) / +- $ ( BSCALE*B( IFIRST, IFIRST ) ) +- AD21L = ( ASCALE*A( IFIRST+1, IFIRST ) ) / +- $ ( BSCALE*B( IFIRST, IFIRST ) ) +- AD12L = ( ASCALE*A( IFIRST, IFIRST+1 ) ) / +- $ ( BSCALE*B( IFIRST+1, IFIRST+1 ) ) +- AD22L = ( ASCALE*A( IFIRST+1, IFIRST+1 ) ) / +- $ ( BSCALE*B( IFIRST+1, IFIRST+1 ) ) +- AD32L = ( ASCALE*A( IFIRST+2, IFIRST+1 ) ) / +- $ ( BSCALE*B( IFIRST+1, IFIRST+1 ) ) +- U12L = B( IFIRST, IFIRST+1 ) / B( IFIRST+1, IFIRST+1 ) ++ AD11 = ( ASCALE*H( ILAST-1, ILAST-1 ) ) / ++ $ ( BSCALE*T( ILAST-1, ILAST-1 ) ) ++ AD21 = ( ASCALE*H( ILAST, ILAST-1 ) ) / ++ $ ( BSCALE*T( ILAST-1, ILAST-1 ) ) ++ AD12 = ( ASCALE*H( ILAST-1, ILAST ) ) / ++ $ ( BSCALE*T( ILAST, ILAST ) ) ++ AD22 = ( ASCALE*H( ILAST, ILAST ) ) / ++ $ ( BSCALE*T( ILAST, ILAST ) ) ++ U12 = T( ILAST-1, ILAST ) / T( ILAST, ILAST ) ++ AD11L = ( ASCALE*H( IFIRST, IFIRST ) ) / ++ $ ( BSCALE*T( IFIRST, IFIRST ) ) ++ AD21L = ( ASCALE*H( IFIRST+1, IFIRST ) ) / ++ $ ( BSCALE*T( IFIRST, IFIRST ) ) ++ AD12L = ( ASCALE*H( IFIRST, IFIRST+1 ) ) / ++ $ ( BSCALE*T( IFIRST+1, IFIRST+1 ) ) ++ AD22L = ( ASCALE*H( IFIRST+1, IFIRST+1 ) ) / ++ $ ( BSCALE*T( IFIRST+1, IFIRST+1 ) ) ++ AD32L = ( ASCALE*H( IFIRST+2, IFIRST+1 ) ) / ++ $ ( BSCALE*T( IFIRST+1, IFIRST+1 ) ) ++ U12L = T( IFIRST, IFIRST+1 ) / T( IFIRST+1, IFIRST+1 ) + * + V( 1 ) = ( AD11-AD11L )*( AD22-AD11L ) - AD12*AD21 + + $ AD21*U12*AD11L + ( AD12L-AD11L*U12L )*AD21L +@@ -1057,27 +1059,27 @@ + * Zero (j-1)st column of A + * + IF( J.GT.ISTART ) THEN +- V( 1 ) = A( J, J-1 ) +- V( 2 ) = A( J+1, J-1 ) +- V( 3 ) = A( J+2, J-1 ) ++ V( 1 ) = H( J, J-1 ) ++ V( 2 ) = H( J+1, J-1 ) ++ V( 3 ) = H( J+2, J-1 ) + * +- CALL SLARFG( 3, A( J, J-1 ), V( 2 ), 1, TAU ) ++ CALL SLARFG( 3, H( J, J-1 ), V( 2 ), 1, TAU ) + V( 1 ) = ONE +- A( J+1, J-1 ) = ZERO +- A( J+2, J-1 ) = ZERO ++ H( J+1, J-1 ) = ZERO ++ H( J+2, J-1 ) = ZERO + END IF + * + DO 230 JC = J, ILASTM +- TEMP = TAU*( A( J, JC )+V( 2 )*A( J+1, JC )+V( 3 )* +- $ A( J+2, JC ) ) +- A( J, JC ) = A( J, JC ) - TEMP +- A( J+1, JC ) = A( J+1, JC ) - TEMP*V( 2 ) +- A( J+2, JC ) = A( J+2, JC ) - TEMP*V( 3 ) +- TEMP2 = TAU*( B( J, JC )+V( 2 )*B( J+1, JC )+V( 3 )* +- $ B( J+2, JC ) ) +- B( J, JC ) = B( J, JC ) - TEMP2 +- B( J+1, JC ) = B( J+1, JC ) - TEMP2*V( 2 ) +- B( J+2, JC ) = B( J+2, JC ) - TEMP2*V( 3 ) ++ TEMP = TAU*( H( J, JC )+V( 2 )*H( J+1, JC )+V( 3 )* ++ $ H( J+2, JC ) ) ++ H( J, JC ) = H( J, JC ) - TEMP ++ H( J+1, JC ) = H( J+1, JC ) - TEMP*V( 2 ) ++ H( J+2, JC ) = H( J+2, JC ) - TEMP*V( 3 ) ++ TEMP2 = TAU*( T( J, JC )+V( 2 )*T( J+1, JC )+V( 3 )* ++ $ T( J+2, JC ) ) ++ T( J, JC ) = T( J, JC ) - TEMP2 ++ T( J+1, JC ) = T( J+1, JC ) - TEMP2*V( 2 ) ++ T( J+2, JC ) = T( J+2, JC ) - TEMP2*V( 3 ) + 230 CONTINUE + IF( ILQ ) THEN + DO 240 JR = 1, N +@@ -1094,27 +1096,27 @@ + * Swap rows to pivot + * + ILPIVT = .FALSE. +- TEMP = MAX( ABS( B( J+1, J+1 ) ), ABS( B( J+1, J+2 ) ) ) +- TEMP2 = MAX( ABS( B( J+2, J+1 ) ), ABS( B( J+2, J+2 ) ) ) ++ TEMP = MAX( ABS( T( J+1, J+1 ) ), ABS( T( J+1, J+2 ) ) ) ++ TEMP2 = MAX( ABS( T( J+2, J+1 ) ), ABS( T( J+2, J+2 ) ) ) + IF( MAX( TEMP, TEMP2 ).LT.SAFMIN ) THEN + SCALE = ZERO + U1 = ONE + U2 = ZERO + GO TO 250 + ELSE IF( TEMP.GE.TEMP2 ) THEN +- W11 = B( J+1, J+1 ) +- W21 = B( J+2, J+1 ) +- W12 = B( J+1, J+2 ) +- W22 = B( J+2, J+2 ) +- U1 = B( J+1, J ) +- U2 = B( J+2, J ) ++ W11 = T( J+1, J+1 ) ++ W21 = T( J+2, J+1 ) ++ W12 = T( J+1, J+2 ) ++ W22 = T( J+2, J+2 ) ++ U1 = T( J+1, J ) ++ U2 = T( J+2, J ) + ELSE +- W21 = B( J+1, J+1 ) +- W11 = B( J+2, J+1 ) +- W22 = B( J+1, J+2 ) +- W12 = B( J+2, J+2 ) +- U2 = B( J+1, J ) +- U1 = B( J+2, J ) ++ W21 = T( J+1, J+1 ) ++ W11 = T( J+2, J+1 ) ++ W22 = T( J+1, J+2 ) ++ W12 = T( J+2, J+2 ) ++ U2 = T( J+1, J ) ++ U1 = T( J+2, J ) + END IF + * + * Swap columns if nec. +@@ -1164,9 +1166,9 @@ + * + * Compute Householder Vector + * +- T = SQRT( SCALE**2+U1**2+U2**2 ) +- TAU = ONE + SCALE / T +- VS = -ONE / ( SCALE+T ) ++ T1 = SQRT( SCALE**2+U1**2+U2**2 ) ++ TAU = ONE + SCALE / T1 ++ VS = -ONE / ( SCALE+T1 ) + V( 1 ) = ONE + V( 2 ) = VS*U1 + V( 3 ) = VS*U2 +@@ -1174,18 +1176,18 @@ + * Apply transformations from the right. + * + DO 260 JR = IFRSTM, MIN( J+3, ILAST ) +- TEMP = TAU*( A( JR, J )+V( 2 )*A( JR, J+1 )+V( 3 )* +- $ A( JR, J+2 ) ) +- A( JR, J ) = A( JR, J ) - TEMP +- A( JR, J+1 ) = A( JR, J+1 ) - TEMP*V( 2 ) +- A( JR, J+2 ) = A( JR, J+2 ) - TEMP*V( 3 ) ++ TEMP = TAU*( H( JR, J )+V( 2 )*H( JR, J+1 )+V( 3 )* ++ $ H( JR, J+2 ) ) ++ H( JR, J ) = H( JR, J ) - TEMP ++ H( JR, J+1 ) = H( JR, J+1 ) - TEMP*V( 2 ) ++ H( JR, J+2 ) = H( JR, J+2 ) - TEMP*V( 3 ) + 260 CONTINUE + DO 270 JR = IFRSTM, J + 2 +- TEMP = TAU*( B( JR, J )+V( 2 )*B( JR, J+1 )+V( 3 )* +- $ B( JR, J+2 ) ) +- B( JR, J ) = B( JR, J ) - TEMP +- B( JR, J+1 ) = B( JR, J+1 ) - TEMP*V( 2 ) +- B( JR, J+2 ) = B( JR, J+2 ) - TEMP*V( 3 ) ++ TEMP = TAU*( T( JR, J )+V( 2 )*T( JR, J+1 )+V( 3 )* ++ $ T( JR, J+2 ) ) ++ T( JR, J ) = T( JR, J ) - TEMP ++ T( JR, J+1 ) = T( JR, J+1 ) - TEMP*V( 2 ) ++ T( JR, J+2 ) = T( JR, J+2 ) - TEMP*V( 3 ) + 270 CONTINUE + IF( ILZ ) THEN + DO 280 JR = 1, N +@@ -1196,8 +1198,8 @@ + Z( JR, J+2 ) = Z( JR, J+2 ) - TEMP*V( 3 ) + 280 CONTINUE + END IF +- B( J+1, J ) = ZERO +- B( J+2, J ) = ZERO ++ T( J+1, J ) = ZERO ++ T( J+2, J ) = ZERO + 290 CONTINUE + * + * Last elements: Use Givens rotations +@@ -1205,17 +1207,17 @@ + * Rotations from the left + * + J = ILAST - 1 +- TEMP = A( J, J-1 ) +- CALL SLARTG( TEMP, A( J+1, J-1 ), C, S, A( J, J-1 ) ) +- A( J+1, J-1 ) = ZERO ++ TEMP = H( J, J-1 ) ++ CALL SLARTG( TEMP, H( J+1, J-1 ), C, S, H( J, J-1 ) ) ++ H( J+1, J-1 ) = ZERO + * + DO 300 JC = J, ILASTM +- TEMP = C*A( J, JC ) + S*A( J+1, JC ) +- A( J+1, JC ) = -S*A( J, JC ) + C*A( J+1, JC ) +- A( J, JC ) = TEMP +- TEMP2 = C*B( J, JC ) + S*B( J+1, JC ) +- B( J+1, JC ) = -S*B( J, JC ) + C*B( J+1, JC ) +- B( J, JC ) = TEMP2 ++ TEMP = C*H( J, JC ) + S*H( J+1, JC ) ++ H( J+1, JC ) = -S*H( J, JC ) + C*H( J+1, JC ) ++ H( J, JC ) = TEMP ++ TEMP2 = C*T( J, JC ) + S*T( J+1, JC ) ++ T( J+1, JC ) = -S*T( J, JC ) + C*T( J+1, JC ) ++ T( J, JC ) = TEMP2 + 300 CONTINUE + IF( ILQ ) THEN + DO 310 JR = 1, N +@@ -1227,19 +1229,19 @@ + * + * Rotations from the right. + * +- TEMP = B( J+1, J+1 ) +- CALL SLARTG( TEMP, B( J+1, J ), C, S, B( J+1, J+1 ) ) +- B( J+1, J ) = ZERO ++ TEMP = T( J+1, J+1 ) ++ CALL SLARTG( TEMP, T( J+1, J ), C, S, T( J+1, J+1 ) ) ++ T( J+1, J ) = ZERO + * + DO 320 JR = IFRSTM, ILAST +- TEMP = C*A( JR, J+1 ) + S*A( JR, J ) +- A( JR, J ) = -S*A( JR, J+1 ) + C*A( JR, J ) +- A( JR, J+1 ) = TEMP ++ TEMP = C*H( JR, J+1 ) + S*H( JR, J ) ++ H( JR, J ) = -S*H( JR, J+1 ) + C*H( JR, J ) ++ H( JR, J+1 ) = TEMP + 320 CONTINUE + DO 330 JR = IFRSTM, ILAST - 1 +- TEMP = C*B( JR, J+1 ) + S*B( JR, J ) +- B( JR, J ) = -S*B( JR, J+1 ) + C*B( JR, J ) +- B( JR, J+1 ) = TEMP ++ TEMP = C*T( JR, J+1 ) + S*T( JR, J ) ++ T( JR, J ) = -S*T( JR, J+1 ) + C*T( JR, J ) ++ T( JR, J+1 ) = TEMP + 330 CONTINUE + IF( ILZ ) THEN + DO 340 JR = 1, N +@@ -1290,15 +1292,15 @@ + * Set Eigenvalues 1:ILO-1 + * + DO 410 J = 1, ILO - 1 +- IF( B( J, J ).LT.ZERO ) THEN ++ IF( T( J, J ).LT.ZERO ) THEN + IF( ILSCHR ) THEN + DO 390 JR = 1, J +- A( JR, J ) = -A( JR, J ) +- B( JR, J ) = -B( JR, J ) ++ H( JR, J ) = -H( JR, J ) ++ T( JR, J ) = -T( JR, J ) + 390 CONTINUE + ELSE +- A( J, J ) = -A( J, J ) +- B( J, J ) = -B( J, J ) ++ H( J, J ) = -H( J, J ) ++ T( J, J ) = -T( J, J ) + END IF + IF( ILZ ) THEN + DO 400 JR = 1, N +@@ -1306,9 +1308,9 @@ + 400 CONTINUE + END IF + END IF +- ALPHAR( J ) = A( J, J ) ++ ALPHAR( J ) = H( J, J ) + ALPHAI( J ) = ZERO +- BETA( J ) = B( J, J ) ++ BETA( J ) = T( J, J ) + 410 CONTINUE + * + * Normal Termination +diff -uNr LAPACK.orig/TIMING/EIG/EIGSRC/stgevc.f LAPACK/TIMING/EIG/EIGSRC/stgevc.f +--- LAPACK.orig/TIMING/EIG/EIGSRC/stgevc.f Thu Nov 4 14:28:30 1999 ++++ LAPACK/TIMING/EIG/EIGSRC/stgevc.f Fri May 25 16:20:41 2001 +@@ -1,18 +1,18 @@ +- SUBROUTINE STGEVC( SIDE, HOWMNY, SELECT, N, A, LDA, B, LDB, VL, ++ SUBROUTINE STGEVC( SIDE, HOWMNY, SELECT, N, S, LDS, P, LDP, VL, + $ LDVL, VR, LDVR, MM, M, WORK, INFO ) + * + * -- LAPACK routine (instrumented to count operations, version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* June 30, 1999 ++* May 4, 2001 + * + * .. Scalar Arguments .. + CHARACTER HOWMNY, SIDE +- INTEGER INFO, LDA, LDB, LDVL, LDVR, M, MM, N ++ INTEGER INFO, LDP, LDS, LDVL, LDVR, M, MM, N + * .. + * .. Array Arguments .. + LOGICAL SELECT( * ) +- REAL A( LDA, * ), B( LDB, * ), VL( LDVL, * ), ++ REAL P( LDP, * ), S( LDS, * ), VL( LDVL, * ), + $ VR( LDVR, * ), WORK( * ) + * .. + * +@@ -33,35 +33,31 @@ + * Purpose + * ======= + * +-* STGEVC computes some or all of the right and/or left generalized +-* eigenvectors of a pair of real upper triangular matrices (A,B). +-* +-* The right generalized eigenvector x and the left generalized +-* eigenvector y of (A,B) corresponding to a generalized eigenvalue +-* w are defined by: +-* +-* (A - wB) * x = 0 and y**H * (A - wB) = 0 +-* ++* STGEVC computes some or all of the right and/or left eigenvectors of ++* a pair of real matrices (S,P), where S is a quasi-triangular matrix ++* and P is upper triangular. Matrix pairs of this type are produced by ++* the generalized Schur factorization of a matrix pair (A,B): ++* ++* A = Q*S*Z**T, B = Q*P*Z**T ++* ++* as computed by SGGHRD + SHGEQZ. ++* ++* The right eigenvector x and the left eigenvector y of (S,P) ++* corresponding to an eigenvalue w are defined by: ++* ++* S*x = w*P*x, (y**H)*S = w*(y**H)*P, ++* + * where y**H denotes the conjugate tranpose of y. +-* +-* If an eigenvalue w is determined by zero diagonal elements of both A +-* and B, a unit vector is returned as the corresponding eigenvector. +-* +-* If all eigenvectors are requested, the routine may either return +-* the matrices X and/or Y of right or left eigenvectors of (A,B), or +-* the products Z*X and/or Q*Y, where Z and Q are input orthogonal +-* matrices. If (A,B) was obtained from the generalized real-Schur +-* factorization of an original pair of matrices +-* (A0,B0) = (Q*A*Z**H,Q*B*Z**H), +-* then Z*X and Q*Y are the matrices of right or left eigenvectors of +-* A. +-* +-* A must be block upper triangular, with 1-by-1 and 2-by-2 diagonal +-* blocks. Corresponding to each 2-by-2 diagonal block is a complex +-* conjugate pair of eigenvalues and eigenvectors; only one +-* eigenvector of the pair is computed, namely the one corresponding +-* to the eigenvalue with positive imaginary part. +-* ++* The eigenvalues are not input to this routine, but are computed ++* directly from the diagonal blocks of S and P. ++* ++* This routine returns the matrices X and/or Y of right and left ++* eigenvectors of (S,P), or the products Z*X and/or Q*Y, ++* where Z and Q are input matrices. ++* If Q and Z are the orthogonal factors from the generalized Schur ++* factorization of a matrix pair (A,B), then Z*X and Q*Y ++* are the matrices of right and left eigenvectors of (A,B). ++* + * Arguments + * ========= + * +@@ -72,78 +68,84 @@ + * + * HOWMNY (input) CHARACTER*1 + * = 'A': compute all right and/or left eigenvectors; +-* = 'B': compute all right and/or left eigenvectors, and +-* backtransform them using the input matrices supplied +-* in VR and/or VL; ++* = 'B': compute all right and/or left eigenvectors, ++* backtransformed by the matrices in VR and/or VL; + * = 'S': compute selected right and/or left eigenvectors, + * specified by the logical array SELECT. + * + * SELECT (input) LOGICAL array, dimension (N) + * If HOWMNY='S', SELECT specifies the eigenvectors to be +-* computed. +-* If HOWMNY='A' or 'B', SELECT is not referenced. +-* To select the real eigenvector corresponding to the real +-* eigenvalue w(j), SELECT(j) must be set to .TRUE. To select +-* the complex eigenvector corresponding to a complex conjugate +-* pair w(j) and w(j+1), either SELECT(j) or SELECT(j+1) must +-* be set to .TRUE.. ++* computed. If w(j) is a real eigenvalue, the corresponding ++* real eigenvector is computed if SELECT(j) is .TRUE.. ++* If w(j) and w(j+1) are the real and imaginary parts of a ++* complex eigenvalue, the corresponding complex eigenvector ++* is computed if either SELECT(j) or SELECT(j+1) is .TRUE., ++* and on exit SELECT(j) is set to .TRUE. and SELECT(j+1) is ++* set to .FALSE.. ++* Not referenced if HOWMNY = 'A' or 'B'. + * + * N (input) INTEGER +-* The order of the matrices A and B. N >= 0. ++* The order of the matrices S and P. N >= 0. + * +-* A (input) REAL array, dimension (LDA,N) +-* The upper quasi-triangular matrix A. ++* S (input) REAL array, dimension (LDS,N) ++* The upper quasi-triangular matrix S from a generalized Schur ++* factorization, as computed by SHGEQZ. ++* ++* LDS (input) INTEGER ++* The leading dimension of array S. LDS >= max(1,N). ++* ++* P (input) REAL array, dimension (LDP,N) ++* The upper triangular matrix P from a generalized Schur ++* factorization, as computed by SHGEQZ. ++* 2-by-2 diagonal blocks of P corresponding to 2-by-2 blocks ++* of S must be in positive diagonal form. + * +-* LDA (input) INTEGER +-* The leading dimension of array A. LDA >= max(1,N). +-* +-* B (input) REAL array, dimension (LDB,N) +-* The upper triangular matrix B. If A has a 2-by-2 diagonal +-* block, then the corresponding 2-by-2 block of B must be +-* diagonal with positive elements. +-* +-* LDB (input) INTEGER +-* The leading dimension of array B. LDB >= max(1,N). ++* LDP (input) INTEGER ++* The leading dimension of array P. LDP >= max(1,N). + * + * VL (input/output) REAL array, dimension (LDVL,MM) + * On entry, if SIDE = 'L' or 'B' and HOWMNY = 'B', VL must + * contain an N-by-N matrix Q (usually the orthogonal matrix Q + * of left Schur vectors returned by SHGEQZ). + * On exit, if SIDE = 'L' or 'B', VL contains: +-* if HOWMNY = 'A', the matrix Y of left eigenvectors of (A,B); ++* if HOWMNY = 'A', the matrix Y of left eigenvectors of (S,P); + * if HOWMNY = 'B', the matrix Q*Y; +-* if HOWMNY = 'S', the left eigenvectors of (A,B) specified by ++* if HOWMNY = 'S', the left eigenvectors of (S,P) specified by + * SELECT, stored consecutively in the columns of + * VL, in the same order as their eigenvalues. +-* If SIDE = 'R', VL is not referenced. + * + * A complex eigenvector corresponding to a complex eigenvalue + * is stored in two consecutive columns, the first holding the + * real part, and the second the imaginary part. + * ++* Not referenced if SIDE = 'R'. ++* + * LDVL (input) INTEGER +-* The leading dimension of array VL. +-* LDVL >= max(1,N) if SIDE = 'L' or 'B'; LDVL >= 1 otherwise. ++* The leading dimension of array VL. LDVL >= 1, and if ++* SIDE = 'L' or 'B', LDVL >= N. + * + * VR (input/output) REAL array, dimension (LDVR,MM) + * On entry, if SIDE = 'R' or 'B' and HOWMNY = 'B', VR must +-* contain an N-by-N matrix Q (usually the orthogonal matrix Z ++* contain an N-by-N matrix Z (usually the orthogonal matrix Z + * of right Schur vectors returned by SHGEQZ). ++* + * On exit, if SIDE = 'R' or 'B', VR contains: +-* if HOWMNY = 'A', the matrix X of right eigenvectors of (A,B); +-* if HOWMNY = 'B', the matrix Z*X; +-* if HOWMNY = 'S', the right eigenvectors of (A,B) specified by +-* SELECT, stored consecutively in the columns of +-* VR, in the same order as their eigenvalues. +-* If SIDE = 'L', VR is not referenced. ++* if HOWMNY = 'A', the matrix X of right eigenvectors of (S,P); ++* if HOWMNY = 'B' or 'b', the matrix Z*X; ++* if HOWMNY = 'S' or 's', the right eigenvectors of (S,P) ++* specified by SELECT, stored consecutively in the ++* columns of VR, in the same order as their ++* eigenvalues. + * + * A complex eigenvector corresponding to a complex eigenvalue + * is stored in two consecutive columns, the first holding the + * real part and the second the imaginary part. ++* ++* Not referenced if SIDE = 'L'. + * + * LDVR (input) INTEGER +-* The leading dimension of the array VR. +-* LDVR >= max(1,N) if SIDE = 'R' or 'B'; LDVR >= 1 otherwise. ++* The leading dimension of the array VR. LDVR >= 1, and if ++* SIDE = 'R' or 'B', LDVR >= N. + * + * MM (input) INTEGER + * The number of columns in the arrays VL and/or VR. MM >= M. +@@ -212,7 +214,7 @@ + * partial sums. Since FORTRAN arrays are stored columnwise, this has + * the advantage that at each step, the elements of C that are accessed + * are adjacent to one another, whereas with the rowwise method, the +-* elements accessed at a step are spaced LDA (and LDB) words apart. ++* elements accessed at a step are spaced LDS (and LDP) words apart. + * + * When finding left eigenvectors, the matrix in question is the + * transpose of the one in storage, so the rowwise method then +@@ -239,8 +241,8 @@ + $ TEMP2R, ULP, XMAX, XSCALE + * .. + * .. Local Arrays .. +- REAL BDIAG( 2 ), SUM( 2, 2 ), SUMA( 2, 2 ), +- $ SUMB( 2, 2 ) ++ REAL BDIAG( 2 ), SUM( 2, 2 ), SUMS( 2, 2 ), ++ $ SUMP( 2, 2 ) + * .. + * .. External Functions .. + LOGICAL LSAME +@@ -265,7 +267,7 @@ + IHWMNY = 2 + ILALL = .FALSE. + ILBACK = .FALSE. +- ELSE IF( LSAME( HOWMNY, 'B' ) .OR. LSAME( HOWMNY, 'T' ) ) THEN ++ ELSE IF( LSAME( HOWMNY, 'B' ) ) THEN + IHWMNY = 3 + ILALL = .TRUE. + ILBACK = .TRUE. +@@ -297,9 +299,9 @@ + INFO = -2 + ELSE IF( N.LT.0 ) THEN + INFO = -4 +- ELSE IF( LDA.LT.MAX( 1, N ) ) THEN ++ ELSE IF( LDS.LT.MAX( 1, N ) ) THEN + INFO = -6 +- ELSE IF( LDB.LT.MAX( 1, N ) ) THEN ++ ELSE IF( LDP.LT.MAX( 1, N ) ) THEN + INFO = -8 + END IF + IF( INFO.NE.0 ) THEN +@@ -318,7 +320,7 @@ + GO TO 10 + END IF + IF( J.LT.N ) THEN +- IF( A( J+1, J ).NE.ZERO ) ++ IF( S( J+1, J ).NE.ZERO ) + $ ILCPLX = .TRUE. + END IF + IF( ILCPLX ) THEN +@@ -338,11 +340,11 @@ + ILABAD = .FALSE. + ILBBAD = .FALSE. + DO 20 J = 1, N - 1 +- IF( A( J+1, J ).NE.ZERO ) THEN +- IF( B( J, J ).EQ.ZERO .OR. B( J+1, J+1 ).EQ.ZERO .OR. +- $ B( J, J+1 ).NE.ZERO )ILBBAD = .TRUE. ++ IF( S( J+1, J ).NE.ZERO ) THEN ++ IF( P( J, J ).EQ.ZERO .OR. P( J+1, J+1 ).EQ.ZERO .OR. ++ $ P( J, J+1 ).NE.ZERO )ILBBAD = .TRUE. + IF( J.LT.N-1 ) THEN +- IF( A( J+2, J+1 ).NE.ZERO ) ++ IF( S( J+2, J+1 ).NE.ZERO ) + $ ILABAD = .TRUE. + END IF + END IF +@@ -385,30 +387,30 @@ + * blocks) of A and B to check for possible overflow in the + * triangular solver. + * +- ANORM = ABS( A( 1, 1 ) ) ++ ANORM = ABS( S( 1, 1 ) ) + IF( N.GT.1 ) +- $ ANORM = ANORM + ABS( A( 2, 1 ) ) +- BNORM = ABS( B( 1, 1 ) ) ++ $ ANORM = ANORM + ABS( S( 2, 1 ) ) ++ BNORM = ABS( P( 1, 1 ) ) + WORK( 1 ) = ZERO + WORK( N+1 ) = ZERO + * + DO 50 J = 2, N + TEMP = ZERO + TEMP2 = ZERO +- IF( A( J, J-1 ).EQ.ZERO ) THEN ++ IF( S( J, J-1 ).EQ.ZERO ) THEN + IEND = J - 1 + ELSE + IEND = J - 2 + END IF + DO 30 I = 1, IEND +- TEMP = TEMP + ABS( A( I, J ) ) +- TEMP2 = TEMP2 + ABS( B( I, J ) ) ++ TEMP = TEMP + ABS( S( I, J ) ) ++ TEMP2 = TEMP2 + ABS( P( I, J ) ) + 30 CONTINUE + WORK( J ) = TEMP + WORK( N+J ) = TEMP2 + DO 40 I = IEND + 1, MIN( J+1, N ) +- TEMP = TEMP + ABS( A( I, J ) ) +- TEMP2 = TEMP2 + ABS( B( I, J ) ) ++ TEMP = TEMP + ABS( S( I, J ) ) ++ TEMP2 = TEMP2 + ABS( P( I, J ) ) + 40 CONTINUE + ANORM = MAX( ANORM, TEMP ) + BNORM = MAX( BNORM, TEMP2 ) +@@ -442,7 +444,7 @@ + END IF + NW = 1 + IF( JE.LT.N ) THEN +- IF( A( JE+1, JE ).NE.ZERO ) THEN ++ IF( S( JE+1, JE ).NE.ZERO ) THEN + ILCPLX = .TRUE. + NW = 2 + END IF +@@ -461,8 +463,8 @@ + * (c) complex eigenvalue. + * + IF( .NOT.ILCPLX ) THEN +- IF( ABS( A( JE, JE ) ).LE.SAFMIN .AND. +- $ ABS( B( JE, JE ) ).LE.SAFMIN ) THEN ++ IF( ABS( S( JE, JE ) ).LE.SAFMIN .AND. ++ $ ABS( P( JE, JE ) ).LE.SAFMIN ) THEN + * + * Singular matrix pencil -- returns unit eigenvector + * +@@ -489,10 +491,10 @@ + * + * Real eigenvalue + * +- TEMP = ONE / MAX( ABS( A( JE, JE ) )*ASCALE, +- $ ABS( B( JE, JE ) )*BSCALE, SAFMIN ) +- SALFAR = ( TEMP*A( JE, JE ) )*ASCALE +- SBETA = ( TEMP*B( JE, JE ) )*BSCALE ++ TEMP = ONE / MAX( ABS( S( JE, JE ) )*ASCALE, ++ $ ABS( P( JE, JE ) )*BSCALE, SAFMIN ) ++ SALFAR = ( TEMP*S( JE, JE ) )*ASCALE ++ SBETA = ( TEMP*P( JE, JE ) )*BSCALE + ACOEF = SBETA*ASCALE + BCOEFR = SALFAR*BSCALE + BCOEFI = ZERO +@@ -534,7 +536,7 @@ + * + * Complex eigenvalue + * +- CALL SLAG2( A( JE, JE ), LDA, B( JE, JE ), LDB, ++ CALL SLAG2( S( JE, JE ), LDS, P( JE, JE ), LDP, + $ SAFMIN*SAFETY, ACOEF, TEMP, BCOEFR, TEMP2, + $ BCOEFI ) + BCOEFI = -BCOEFI +@@ -566,9 +568,9 @@ + * + * Compute first two components of eigenvector + * +- TEMP = ACOEF*A( JE+1, JE ) +- TEMP2R = ACOEF*A( JE, JE ) - BCOEFR*B( JE, JE ) +- TEMP2I = -BCOEFI*B( JE, JE ) ++ TEMP = ACOEF*S( JE+1, JE ) ++ TEMP2R = ACOEF*S( JE, JE ) - BCOEFR*P( JE, JE ) ++ TEMP2I = -BCOEFI*P( JE, JE ) + IF( ABS( TEMP ).GT.ABS( TEMP2R )+ABS( TEMP2I ) ) THEN + WORK( 2*N+JE ) = ONE + WORK( 3*N+JE ) = ZERO +@@ -577,10 +579,10 @@ + ELSE + WORK( 2*N+JE+1 ) = ONE + WORK( 3*N+JE+1 ) = ZERO +- TEMP = ACOEF*A( JE, JE+1 ) +- WORK( 2*N+JE ) = ( BCOEFR*B( JE+1, JE+1 )-ACOEF* +- $ A( JE+1, JE+1 ) ) / TEMP +- WORK( 3*N+JE ) = BCOEFI*B( JE+1, JE+1 ) / TEMP ++ TEMP = ACOEF*S( JE, JE+1 ) ++ WORK( 2*N+JE ) = ( BCOEFR*P( JE+1, JE+1 )-ACOEF* ++ $ S( JE+1, JE+1 ) ) / TEMP ++ WORK( 3*N+JE ) = BCOEFI*P( JE+1, JE+1 ) / TEMP + END IF + XMAX = MAX( ABS( WORK( 2*N+JE ) )+ABS( WORK( 3*N+JE ) ), + $ ABS( WORK( 2*N+JE+1 ) )+ABS( WORK( 3*N+JE+1 ) ) ) +@@ -610,11 +612,11 @@ + END IF + * + NA = 1 +- BDIAG( 1 ) = B( J, J ) ++ BDIAG( 1 ) = P( J, J ) + IF( J.LT.N ) THEN +- IF( A( J+1, J ).NE.ZERO ) THEN ++ IF( S( J+1, J ).NE.ZERO ) THEN + IL2BY2 = .TRUE. +- BDIAG( 2 ) = B( J+1, J+1 ) ++ BDIAG( 2 ) = P( J+1, J+1 ) + NA = 2 + * ---------------- Begin Timing Code ---------------- + IN2BY2 = IN2BY2 + 1 +@@ -646,13 +648,13 @@ + * Compute dot products + * + * j-1 +-* SUM = sum conjg( a*A(k,j) - b*B(k,j) )*x(k) ++* SUM = sum conjg( a*S(k,j) - b*P(k,j) )*x(k) + * k=je + * + * To reduce the op count, this is done as + * + * _ j-1 _ j-1 +-* a*conjg( sum A(k,j)*x(k) ) - b*conjg( sum B(k,j)*x(k) ) ++* a*conjg( sum S(k,j)*x(k) ) - b*conjg( sum P(k,j)*x(k) ) + * k=je k=je + * + * which may cause underflow problems if A or B are close +@@ -689,15 +691,15 @@ + *$PL$ CMCHAR='*' + * + DO 110 JA = 1, NA +- SUMA( JA, JW ) = ZERO +- SUMB( JA, JW ) = ZERO ++ SUMS( JA, JW ) = ZERO ++ SUMP( JA, JW ) = ZERO + * + DO 100 JR = JE, J - 1 +- SUMA( JA, JW ) = SUMA( JA, JW ) + +- $ A( JR, J+JA-1 )* ++ SUMS( JA, JW ) = SUMS( JA, JW ) + ++ $ S( JR, J+JA-1 )* + $ WORK( ( JW+1 )*N+JR ) +- SUMB( JA, JW ) = SUMB( JA, JW ) + +- $ B( JR, J+JA-1 )* ++ SUMP( JA, JW ) = SUMP( JA, JW ) + ++ $ P( JR, J+JA-1 )* + $ WORK( ( JW+1 )*N+JR ) + 100 CONTINUE + 110 CONTINUE +@@ -717,15 +719,15 @@ + * + DO 130 JA = 1, NA + IF( ILCPLX ) THEN +- SUM( JA, 1 ) = -ACOEF*SUMA( JA, 1 ) + +- $ BCOEFR*SUMB( JA, 1 ) - +- $ BCOEFI*SUMB( JA, 2 ) +- SUM( JA, 2 ) = -ACOEF*SUMA( JA, 2 ) + +- $ BCOEFR*SUMB( JA, 2 ) + +- $ BCOEFI*SUMB( JA, 1 ) ++ SUM( JA, 1 ) = -ACOEF*SUMS( JA, 1 ) + ++ $ BCOEFR*SUMP( JA, 1 ) - ++ $ BCOEFI*SUMP( JA, 2 ) ++ SUM( JA, 2 ) = -ACOEF*SUMS( JA, 2 ) + ++ $ BCOEFR*SUMP( JA, 2 ) + ++ $ BCOEFI*SUMP( JA, 1 ) + ELSE +- SUM( JA, 1 ) = -ACOEF*SUMA( JA, 1 ) + +- $ BCOEFR*SUMB( JA, 1 ) ++ SUM( JA, 1 ) = -ACOEF*SUMS( JA, 1 ) + ++ $ BCOEFR*SUMP( JA, 1 ) + END IF + 130 CONTINUE + * +@@ -733,7 +735,7 @@ + * Solve ( a A - b B ) y = SUM(,) + * with scaling and perturbation of the denominator + * +- CALL SLALN2( .TRUE., NA, NW, DMIN, ACOEF, A( J, J ), LDA, ++ CALL SLALN2( .TRUE., NA, NW, DMIN, ACOEF, S( J, J ), LDS, + $ BDIAG( 1 ), BDIAG( 2 ), SUM, 2, BCOEFR, + $ BCOEFI, WORK( 2*N+J ), N, SCALE, TEMP, + $ IINFO ) +@@ -859,7 +861,7 @@ + END IF + NW = 1 + IF( JE.GT.1 ) THEN +- IF( A( JE, JE-1 ).NE.ZERO ) THEN ++ IF( S( JE, JE-1 ).NE.ZERO ) THEN + ILCPLX = .TRUE. + NW = 2 + END IF +@@ -878,8 +880,8 @@ + * (c) complex eigenvalue. + * + IF( .NOT.ILCPLX ) THEN +- IF( ABS( A( JE, JE ) ).LE.SAFMIN .AND. +- $ ABS( B( JE, JE ) ).LE.SAFMIN ) THEN ++ IF( ABS( S( JE, JE ) ).LE.SAFMIN .AND. ++ $ ABS( P( JE, JE ) ).LE.SAFMIN ) THEN + * + * Singular matrix pencil -- returns unit eigenvector + * +@@ -908,10 +910,10 @@ + * + * Real eigenvalue + * +- TEMP = ONE / MAX( ABS( A( JE, JE ) )*ASCALE, +- $ ABS( B( JE, JE ) )*BSCALE, SAFMIN ) +- SALFAR = ( TEMP*A( JE, JE ) )*ASCALE +- SBETA = ( TEMP*B( JE, JE ) )*BSCALE ++ TEMP = ONE / MAX( ABS( S( JE, JE ) )*ASCALE, ++ $ ABS( P( JE, JE ) )*BSCALE, SAFMIN ) ++ SALFAR = ( TEMP*S( JE, JE ) )*ASCALE ++ SBETA = ( TEMP*P( JE, JE ) )*BSCALE + ACOEF = SBETA*ASCALE + BCOEFR = SALFAR*BSCALE + BCOEFI = ZERO +@@ -954,14 +956,14 @@ + * (See "Further Details", above.) + * + DO 260 JR = 1, JE - 1 +- WORK( 2*N+JR ) = BCOEFR*B( JR, JE ) - +- $ ACOEF*A( JR, JE ) ++ WORK( 2*N+JR ) = BCOEFR*P( JR, JE ) - ++ $ ACOEF*S( JR, JE ) + 260 CONTINUE + ELSE + * + * Complex eigenvalue + * +- CALL SLAG2( A( JE-1, JE-1 ), LDA, B( JE-1, JE-1 ), LDB, ++ CALL SLAG2( S( JE-1, JE-1 ), LDS, P( JE-1, JE-1 ), LDP, + $ SAFMIN*SAFETY, ACOEF, TEMP, BCOEFR, TEMP2, + $ BCOEFI ) + IF( BCOEFI.EQ.ZERO ) THEN +@@ -993,9 +995,9 @@ + * Compute first two components of eigenvector + * and contribution to sums + * +- TEMP = ACOEF*A( JE, JE-1 ) +- TEMP2R = ACOEF*A( JE, JE ) - BCOEFR*B( JE, JE ) +- TEMP2I = -BCOEFI*B( JE, JE ) ++ TEMP = ACOEF*S( JE, JE-1 ) ++ TEMP2R = ACOEF*S( JE, JE ) - BCOEFR*P( JE, JE ) ++ TEMP2I = -BCOEFI*P( JE, JE ) + IF( ABS( TEMP ).GE.ABS( TEMP2R )+ABS( TEMP2I ) ) THEN + WORK( 2*N+JE ) = ONE + WORK( 3*N+JE ) = ZERO +@@ -1004,10 +1006,10 @@ + ELSE + WORK( 2*N+JE-1 ) = ONE + WORK( 3*N+JE-1 ) = ZERO +- TEMP = ACOEF*A( JE-1, JE ) +- WORK( 2*N+JE ) = ( BCOEFR*B( JE-1, JE-1 )-ACOEF* +- $ A( JE-1, JE-1 ) ) / TEMP +- WORK( 3*N+JE ) = BCOEFI*B( JE-1, JE-1 ) / TEMP ++ TEMP = ACOEF*S( JE-1, JE ) ++ WORK( 2*N+JE ) = ( BCOEFR*P( JE-1, JE-1 )-ACOEF* ++ $ S( JE-1, JE-1 ) ) / TEMP ++ WORK( 3*N+JE ) = BCOEFI*P( JE-1, JE-1 ) / TEMP + END IF + * + XMAX = MAX( ABS( WORK( 2*N+JE ) )+ABS( WORK( 3*N+JE ) ), +@@ -1027,12 +1029,12 @@ + CRE2B = BCOEFR*WORK( 2*N+JE ) - BCOEFI*WORK( 3*N+JE ) + CIM2B = BCOEFI*WORK( 2*N+JE ) + BCOEFR*WORK( 3*N+JE ) + DO 270 JR = 1, JE - 2 +- WORK( 2*N+JR ) = -CREALA*A( JR, JE-1 ) + +- $ CREALB*B( JR, JE-1 ) - +- $ CRE2A*A( JR, JE ) + CRE2B*B( JR, JE ) +- WORK( 3*N+JR ) = -CIMAGA*A( JR, JE-1 ) + +- $ CIMAGB*B( JR, JE-1 ) - +- $ CIM2A*A( JR, JE ) + CIM2B*B( JR, JE ) ++ WORK( 2*N+JR ) = -CREALA*S( JR, JE-1 ) + ++ $ CREALB*P( JR, JE-1 ) - ++ $ CRE2A*S( JR, JE ) + CRE2B*P( JR, JE ) ++ WORK( 3*N+JR ) = -CIMAGA*S( JR, JE-1 ) + ++ $ CIMAGB*P( JR, JE-1 ) - ++ $ CIM2A*S( JR, JE ) + CIM2B*P( JR, JE ) + 270 CONTINUE + END IF + * +@@ -1054,7 +1056,7 @@ + * next iteration to process it (when it will be j:j+1) + * + IF( .NOT.IL2BY2 .AND. J.GT.1 ) THEN +- IF( A( J, J-1 ).NE.ZERO ) THEN ++ IF( S( J, J-1 ).NE.ZERO ) THEN + IL2BY2 = .TRUE. + * -------------- Begin Timing Code ----------------- + IN2BY2 = IN2BY2 + 1 +@@ -1062,18 +1064,18 @@ + GO TO 370 + END IF + END IF +- BDIAG( 1 ) = B( J, J ) ++ BDIAG( 1 ) = P( J, J ) + IF( IL2BY2 ) THEN + NA = 2 +- BDIAG( 2 ) = B( J+1, J+1 ) ++ BDIAG( 2 ) = P( J+1, J+1 ) + ELSE + NA = 1 + END IF + * + * Compute x(j) (and x(j+1), if 2-by-2 block) + * +- CALL SLALN2( .FALSE., NA, NW, DMIN, ACOEF, A( J, J ), +- $ LDA, BDIAG( 1 ), BDIAG( 2 ), WORK( 2*N+J ), ++ CALL SLALN2( .FALSE., NA, NW, DMIN, ACOEF, S( J, J ), ++ $ LDS, BDIAG( 1 ), BDIAG( 2 ), WORK( 2*N+J ), + $ N, BCOEFR, BCOEFI, SUM, 2, SCALE, TEMP, + $ IINFO ) + IF( SCALE.LT.ONE ) THEN +@@ -1096,7 +1098,7 @@ + 300 CONTINUE + 310 CONTINUE + * +-* w = w + x(j)*(a A(*,j) - b B(*,j) ) with scaling ++* w = w + x(j)*(a S(*,j) - b P(*,j) ) with scaling + * + IF( J.GT.1 ) THEN + * +@@ -1137,19 +1139,19 @@ + $ BCOEFR*WORK( 3*N+J+JA-1 ) + DO 340 JR = 1, J - 1 + WORK( 2*N+JR ) = WORK( 2*N+JR ) - +- $ CREALA*A( JR, J+JA-1 ) + +- $ CREALB*B( JR, J+JA-1 ) ++ $ CREALA*S( JR, J+JA-1 ) + ++ $ CREALB*P( JR, J+JA-1 ) + WORK( 3*N+JR ) = WORK( 3*N+JR ) - +- $ CIMAGA*A( JR, J+JA-1 ) + +- $ CIMAGB*B( JR, J+JA-1 ) ++ $ CIMAGA*S( JR, J+JA-1 ) + ++ $ CIMAGB*P( JR, J+JA-1 ) + 340 CONTINUE + ELSE + CREALA = ACOEF*WORK( 2*N+J+JA-1 ) + CREALB = BCOEFR*WORK( 2*N+J+JA-1 ) + DO 350 JR = 1, J - 1 + WORK( 2*N+JR ) = WORK( 2*N+JR ) - +- $ CREALA*A( JR, J+JA-1 ) + +- $ CREALB*B( JR, J+JA-1 ) ++ $ CREALA*S( JR, J+JA-1 ) + ++ $ CREALB*P( JR, J+JA-1 ) + 350 CONTINUE + END IF + 360 CONTINUE +diff -uNr LAPACK.orig/TIMING/EIG/EIGSRC/strevc.f LAPACK/TIMING/EIG/EIGSRC/strevc.f +--- LAPACK.orig/TIMING/EIG/EIGSRC/strevc.f Thu Nov 4 14:28:33 1999 ++++ LAPACK/TIMING/EIG/EIGSRC/strevc.f Fri May 25 16:20:57 2001 +@@ -4,7 +4,7 @@ + * -- LAPACK routine (instrumented to count operations, version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* June 30, 1999 ++* May 7, 2001 + * + * .. Scalar Arguments .. + CHARACTER HOWMNY, SIDE +@@ -30,28 +30,23 @@ + * + * STREVC computes some or all of the right and/or left eigenvectors of + * a real upper quasi-triangular matrix T. +-* ++* Matrices of this type are produced by the Schur factorization of ++* a real general matrix: A = Q*T*Q**T, as computed by SHSEQR. ++* + * The right eigenvector x and the left eigenvector y of T corresponding + * to an eigenvalue w are defined by: +-* +-* T*x = w*x, y'*T = w*y' +-* +-* where y' denotes the conjugate transpose of the vector y. +-* +-* If all eigenvectors are requested, the routine may either return the +-* matrices X and/or Y of right or left eigenvectors of T, or the +-* products Q*X and/or Q*Y, where Q is an input orthogonal +-* matrix. If T was obtained from the real-Schur factorization of an +-* original matrix A = Q*T*Q', then Q*X and Q*Y are the matrices of +-* right or left eigenvectors of A. +-* +-* T must be in Schur canonical form (as returned by SHSEQR), that is, +-* block upper triangular with 1-by-1 and 2-by-2 diagonal blocks; each +-* 2-by-2 diagonal block has its diagonal elements equal and its +-* off-diagonal elements of opposite sign. Corresponding to each 2-by-2 +-* diagonal block is a complex conjugate pair of eigenvalues and +-* eigenvectors; only one eigenvector of the pair is computed, namely +-* the one corresponding to the eigenvalue with positive imaginary part. ++* ++* T*x = w*x, (y**H)*T = w*(y**H) ++* ++* where y**H denotes the conjugate transpose of y. ++* The eigenvalues are not input to this routine, but are read directly ++* from the diagonal blocks of T. ++* ++* This routine returns the matrices X and/or Y of right and left ++* eigenvectors of T, or the products Q*X and/or Q*Y, where Q is an ++* input matrix. If Q is the orthogonal factor that reduces a matrix ++* A to Schur form T, then Q*X and Q*Y are the matrices of right and ++* left eigenvectors of A. + * + * Arguments + * ========= +@@ -64,21 +59,21 @@ + * HOWMNY (input) CHARACTER*1 + * = 'A': compute all right and/or left eigenvectors; + * = 'B': compute all right and/or left eigenvectors, +-* and backtransform them using the input matrices +-* supplied in VR and/or VL; ++* backtransformed by the matrices in VR and/or VL; + * = 'S': compute selected right and/or left eigenvectors, +-* specified by the logical array SELECT. ++* as indicated by the logical array SELECT. + * + * SELECT (input/output) LOGICAL array, dimension (N) + * If HOWMNY = 'S', SELECT specifies the eigenvectors to be + * computed. +-* If HOWMNY = 'A' or 'B', SELECT is not referenced. +-* To select the real eigenvector corresponding to a real +-* eigenvalue w(j), SELECT(j) must be set to .TRUE.. To select +-* the complex eigenvector corresponding to a complex conjugate +-* pair w(j) and w(j+1), either SELECT(j) or SELECT(j+1) must be +-* set to .TRUE.; then on exit SELECT(j) is .TRUE. and +-* SELECT(j+1) is .FALSE.. ++* If w(j) is a real eigenvalue, the corresponding real ++* eigenvector is computed if SELECT(j) is .TRUE.. ++* If w(j) and w(j+1) are the real and imaginary parts of a ++* complex eigenvalue, the corresponding complex eigenvector is ++* computed if either SELECT(j) or SELECT(j+1) is .TRUE., and ++* on exit SELECT(j) is set to .TRUE. and SELECT(j+1) is set to ++* .FALSE.. ++* Not referenced if HOWMNY = 'A' or 'B'. + * + * N (input) INTEGER + * The order of the matrix T. N >= 0. +@@ -95,15 +90,6 @@ + * of Schur vectors returned by SHSEQR). + * On exit, if SIDE = 'L' or 'B', VL contains: + * if HOWMNY = 'A', the matrix Y of left eigenvectors of T; +-* VL has the same quasi-lower triangular form +-* as T'. If T(i,i) is a real eigenvalue, then +-* the i-th column VL(i) of VL is its +-* corresponding eigenvector. If T(i:i+1,i:i+1) +-* is a 2-by-2 block whose eigenvalues are +-* complex-conjugate eigenvalues of T, then +-* VL(i)+sqrt(-1)*VL(i+1) is the complex +-* eigenvector corresponding to the eigenvalue +-* with positive real part. + * if HOWMNY = 'B', the matrix Q*Y; + * if HOWMNY = 'S', the left eigenvectors of T specified by + * SELECT, stored consecutively in the columns +@@ -112,11 +98,11 @@ + * A complex eigenvector corresponding to a complex eigenvalue + * is stored in two consecutive columns, the first holding the + * real part, and the second the imaginary part. +-* If SIDE = 'R', VL is not referenced. ++* Not referenced if SIDE = 'R'. + * + * LDVL (input) INTEGER +-* The leading dimension of the array VL. LDVL >= max(1,N) if +-* SIDE = 'L' or 'B'; LDVL >= 1 otherwise. ++* The leading dimension of the array VL. LDVL >= 1, and if ++* SIDE = 'L' or 'B', LDVL >= N. + * + * VR (input/output) REAL array, dimension (LDVR,MM) + * On entry, if SIDE = 'R' or 'B' and HOWMNY = 'B', VR must +@@ -124,15 +110,6 @@ + * of Schur vectors returned by SHSEQR). + * On exit, if SIDE = 'R' or 'B', VR contains: + * if HOWMNY = 'A', the matrix X of right eigenvectors of T; +-* VR has the same quasi-upper triangular form +-* as T. If T(i,i) is a real eigenvalue, then +-* the i-th column VR(i) of VR is its +-* corresponding eigenvector. If T(i:i+1,i:i+1) +-* is a 2-by-2 block whose eigenvalues are +-* complex-conjugate eigenvalues of T, then +-* VR(i)+sqrt(-1)*VR(i+1) is the complex +-* eigenvector corresponding to the eigenvalue +-* with positive real part. + * if HOWMNY = 'B', the matrix Q*X; + * if HOWMNY = 'S', the right eigenvectors of T specified by + * SELECT, stored consecutively in the columns +@@ -141,11 +118,11 @@ + * A complex eigenvector corresponding to a complex eigenvalue + * is stored in two consecutive columns, the first holding the + * real part and the second the imaginary part. +-* If SIDE = 'L', VR is not referenced. ++* Not referenced if SIDE = 'L'. + * + * LDVR (input) INTEGER +-* The leading dimension of the array VR. LDVR >= max(1,N) if +-* SIDE = 'R' or 'B'; LDVR >= 1 otherwise. ++* The leading dimension of the array VR. LDVR >= 1, and if ++* SIDE = 'R' or 'B', LDVR >= N. + * + * MM (input) INTEGER + * The number of columns in the arrays VL and/or VR. MM >= M. +diff -uNr LAPACK.orig/TIMING/EIG/EIGSRC/zbdsqr.f LAPACK/TIMING/EIG/EIGSRC/zbdsqr.f +--- LAPACK.orig/TIMING/EIG/EIGSRC/zbdsqr.f Thu Nov 4 14:28:30 1999 ++++ LAPACK/TIMING/EIG/EIGSRC/zbdsqr.f Fri May 25 16:20:01 2001 +@@ -4,7 +4,7 @@ + * -- LAPACK routine (instrumented to count operations, version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* October 31, 1999 ++* April 25, 2001 + * + * .. Scalar Arguments .. + CHARACTER UPLO +@@ -26,14 +26,26 @@ + * Purpose + * ======= + * +-* ZBDSQR computes the singular value decomposition (SVD) of a real +-* N-by-N (upper or lower) bidiagonal matrix B: B = Q * S * P' (P' +-* denotes the transpose of P), where S is a diagonal matrix with +-* non-negative diagonal elements (the singular values of B), and Q +-* and P are orthogonal matrices. ++* ZBDSQR computes the singular values and, optionally, the right and/or ++* left singular vectors from the singular value decomposition (SVD) of ++* a real N-by-N (upper or lower) bidiagonal matrix B using the implicit ++* zero-shift QR algorithm. The SVD of B has the form ++* ++* B = Q * S * P**H ++* ++* where S is the diagonal matrix of singular values, Q is an orthogonal ++* matrix of left singular vectors, and P is an orthogonal matrix of ++* right singular vectors. If left singular vectors are requested, this ++* subroutine actually returns U*Q instead of Q, and, if right singular ++* vectors are requested, this subroutine returns P**H*VT instead of ++* P**H, for given complex input matrices U and VT. When U and VT are ++* the unitary matrices that reduce a general matrix A to bidiagonal ++* form: A = U*B*VT, as computed by ZGEBRD, then + * +-* The routine computes S, and optionally computes U * Q, P' * VT, +-* or Q' * C, for given complex input matrices U, VT, and C. ++* A = (U*Q) * S * (P**H*VT) ++* ++* is the SVD of A. Optionally, the subroutine may also compute Q**H*C ++* for a given complex input matrix C. + * + * See "Computing Small Singular Values of Bidiagonal Matrices With + * Guaranteed High Relative Accuracy," by J. Demmel and W. Kahan, +@@ -69,18 +81,17 @@ + * order. + * + * E (input/output) DOUBLE PRECISION array, dimension (N) +-* On entry, the elements of E contain the +-* offdiagonal elements of of the bidiagonal matrix whose SVD +-* is desired. On normal exit (INFO = 0), E is destroyed. +-* If the algorithm does not converge (INFO > 0), D and E ++* On entry, the N-1 offdiagonal elements of the bidiagonal ++* matrix B. ++* On exit, if INFO = 0, E is destroyed; if INFO > 0, D and E + * will contain the diagonal and superdiagonal elements of a + * bidiagonal matrix orthogonally equivalent to the one given + * as input. E(N) is used for workspace. + * + * VT (input/output) COMPLEX*16 array, dimension (LDVT, NCVT) + * On entry, an N-by-NCVT matrix VT. +-* On exit, VT is overwritten by P' * VT. +-* VT is not referenced if NCVT = 0. ++* On exit, VT is overwritten by P**H * VT. ++* Not referenced if NCVT = 0. + * + * LDVT (input) INTEGER + * The leading dimension of the array VT. +@@ -89,21 +100,22 @@ + * U (input/output) COMPLEX*16 array, dimension (LDU, N) + * On entry, an NRU-by-N matrix U. + * On exit, U is overwritten by U * Q. +-* U is not referenced if NRU = 0. ++* Not referenced if NRU = 0. + * + * LDU (input) INTEGER + * The leading dimension of the array U. LDU >= max(1,NRU). + * + * C (input/output) COMPLEX*16 array, dimension (LDC, NCC) + * On entry, an N-by-NCC matrix C. +-* On exit, C is overwritten by Q' * C. +-* C is not referenced if NCC = 0. ++* On exit, C is overwritten by Q**H * C. ++* Not referenced if NCC = 0. + * + * LDC (input) INTEGER + * The leading dimension of the array C. + * LDC >= max(1,N) if NCC > 0; LDC >=1 if NCC = 0. + * +-* RWORK (workspace) DOUBLE PRECISION array, dimension (4*N) ++* RWORK (workspace) DOUBLE PRECISION array, dimension (2*N) ++* if NCVT = NRU = NCC = 0, (max(1, 4*N-4)) otherwise + * + * INFO (output) INTEGER + * = 0: successful exit +diff -uNr LAPACK.orig/TIMING/EIG/EIGSRC/zgghrd.f LAPACK/TIMING/EIG/EIGSRC/zgghrd.f +--- LAPACK.orig/TIMING/EIG/EIGSRC/zgghrd.f Thu Nov 4 14:28:32 1999 ++++ LAPACK/TIMING/EIG/EIGSRC/zgghrd.f Fri May 25 16:20:24 2001 +@@ -4,7 +4,7 @@ + * -- LAPACK routine (instrumented to count operations, version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* September 30, 1994 ++* April 26, 2001 + * + * .. Scalar Arguments .. + CHARACTER COMPQ, COMPZ +@@ -33,16 +33,29 @@ + * + * ZGGHRD reduces a pair of complex matrices (A,B) to generalized upper + * Hessenberg form using unitary transformations, where A is a +-* general matrix and B is upper triangular: Q' * A * Z = H and +-* Q' * B * Z = T, where H is upper Hessenberg, T is upper triangular, +-* and Q and Z are unitary, and ' means conjugate transpose. ++* general matrix and B is upper triangular. The form of the ++* generalized eigenvalue problem is ++* A*x = lambda*B*x, ++* and B is typically made upper triangular by computing its QR ++* factorization and moving the unitary matrix Q to the left side ++* of the equation. ++* ++* This subroutine simultaneously reduces A to a Hessenberg matrix H: ++* Q**H*A*Z = H ++* and transforms B to another upper triangular matrix T: ++* Q**H*B*Z = T ++* in order to reduce the problem to its standard form ++* H*y = lambda*T*y ++* where y = Z**H*x. + * + * The unitary matrices Q and Z are determined as products of Givens + * rotations. They may either be formed explicitly, or they may be + * postmultiplied into input matrices Q1 and Z1, so that +-* +-* Q1 * A * Z1' = (Q1*Q) * H * (Z1*Z)' +-* Q1 * B * Z1' = (Q1*Q) * T * (Z1*Z)' ++* Q1 * A * Z1**H = (Q1*Q) * H * (Z1*Z)**H ++* Q1 * B * Z1**H = (Q1*Q) * T * (Z1*Z)**H ++* If Q1 is the unitary matrix from the QR factorization of B in the ++* original equation A*x = lambda*B*x, then ZGGHRD reduces the original ++* problem to generalized Hessenberg form. + * + * Arguments + * ========= +@@ -66,10 +79,11 @@ + * + * ILO (input) INTEGER + * IHI (input) INTEGER +-* It is assumed that A is already upper triangular in rows and +-* columns 1:ILO-1 and IHI+1:N. ILO and IHI are normally set +-* by a previous call to ZGGBAL; otherwise they should be set +-* to 1 and N respectively. ++* ILO and IHI mark the rows and columns of A which are to be ++* reduced. It is assumed that A is already upper triangular ++* in rows and columns 1:ILO-1 and IHI+1:N. ILO and IHI are ++* normally set by a previous call to ZGGBAL; otherwise they ++* should be set to 1 and N respectively. + * 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0. + * + * A (input/output) COMPLEX*16 array, dimension (LDA, N) +@@ -83,33 +97,28 @@ + * + * B (input/output) COMPLEX*16 array, dimension (LDB, N) + * On entry, the N-by-N upper triangular matrix B. +-* On exit, the upper triangular matrix T = Q' B Z. The ++* On exit, the upper triangular matrix T = Q**H B Z. The + * elements below the diagonal are set to zero. + * + * LDB (input) INTEGER + * The leading dimension of the array B. LDB >= max(1,N). + * + * Q (input/output) COMPLEX*16 array, dimension (LDQ, N) +-* If COMPQ='N': Q is not referenced. +-* If COMPQ='I': on entry, Q need not be set, and on exit it +-* contains the unitary matrix Q, where Q' +-* is the product of the Givens transformations +-* which are applied to A and B on the left. +-* If COMPQ='V': on entry, Q must contain a unitary matrix +-* Q1, and on exit this is overwritten by Q1*Q. ++* On entry, if COMPQ = 'V', the unitary matrix Q1, typically ++* from the QR factorization of B. ++* On exit, if COMPQ='I', the unitary matrix Q, and if ++* COMPQ = 'V', the product Q1*Q. ++* Not referenced if COMPQ='N'. + * + * LDQ (input) INTEGER + * The leading dimension of the array Q. + * LDQ >= N if COMPQ='V' or 'I'; LDQ >= 1 otherwise. + * + * Z (input/output) COMPLEX*16 array, dimension (LDZ, N) +-* If COMPZ='N': Z is not referenced. +-* If COMPZ='I': on entry, Z need not be set, and on exit it +-* contains the unitary matrix Z, which is +-* the product of the Givens transformations +-* which are applied to A and B on the right. +-* If COMPZ='V': on entry, Z must contain a unitary matrix +-* Z1, and on exit this is overwritten by Z1*Z. ++* On entry, if COMPZ = 'V', the unitary matrix Z1. ++* On exit, if COMPZ='I', the unitary matrix Z, and if ++* COMPZ = 'V', the product Z1*Z. ++* Not referenced if COMPZ='N'. + * + * LDZ (input) INTEGER + * The leading dimension of the array Z. +diff -uNr LAPACK.orig/TIMING/EIG/EIGSRC/zhgeqz.f LAPACK/TIMING/EIG/EIGSRC/zhgeqz.f +--- LAPACK.orig/TIMING/EIG/EIGSRC/zhgeqz.f Thu Nov 4 14:28:33 1999 ++++ LAPACK/TIMING/EIG/EIGSRC/zhgeqz.f Fri May 25 16:20:38 2001 +@@ -1,20 +1,21 @@ +- SUBROUTINE ZHGEQZ( JOB, COMPQ, COMPZ, N, ILO, IHI, A, LDA, B, LDB, ++ SUBROUTINE ZHGEQZ( JOB, COMPQ, COMPZ, N, ILO, IHI, H, LDH, T, LDT, + $ ALPHA, BETA, Q, LDQ, Z, LDZ, WORK, LWORK, + $ RWORK, INFO ) + * + * -- LAPACK routine (instrumented to count operations, version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* June 30, 1999 ++* May 3, 2001 + * + * .. Scalar Arguments .. + CHARACTER COMPQ, COMPZ, JOB +- INTEGER IHI, ILO, INFO, LDA, LDB, LDQ, LDZ, LWORK, N ++ INTEGER IHI, ILO, INFO, LDH, LDQ, LDT, LDZ, LWORK, N + * .. + * .. Array Arguments .. + DOUBLE PRECISION RWORK( * ) +- COMPLEX*16 A( LDA, * ), ALPHA( * ), B( LDB, * ), +- $ BETA( * ), Q( LDQ, * ), WORK( * ), Z( LDZ, * ) ++ COMPLEX*16 ALPHA( * ), BETA( * ), H( LDH, * ), ++ $ Q( LDQ, * ), T( LDT, * ), WORK( * ), ++ $ Z( LDZ, * ) + * .. + * + * ----------------------- Begin Timing Code ------------------------ +@@ -34,24 +35,44 @@ + * Purpose + * ======= + * +-* ZHGEQZ implements a single-shift version of the QZ +-* method for finding the generalized eigenvalues w(i)=ALPHA(i)/BETA(i) +-* of the equation +-* +-* det( A - w(i) B ) = 0 +-* +-* If JOB='S', then the pair (A,B) is simultaneously +-* reduced to Schur form (i.e., A and B are both upper triangular) by +-* applying one unitary tranformation (usually called Q) on the left and +-* another (usually called Z) on the right. The diagonal elements of +-* A are then ALPHA(1),...,ALPHA(N), and of B are BETA(1),...,BETA(N). +-* +-* If JOB='S' and COMPQ and COMPZ are 'V' or 'I', then the unitary +-* transformations used to reduce (A,B) are accumulated into the arrays +-* Q and Z s.t.: +-* +-* Q(in) A(in) Z(in)* = Q(out) A(out) Z(out)* +-* Q(in) B(in) Z(in)* = Q(out) B(out) Z(out)* ++* ZHGEQZ computes the eigenvalues of a complex matrix pair (H,T), ++* where H is an upper Hessenberg matrix and T is upper triangular, ++* using the single-shift QZ method. ++* Matrix pairs of this type are produced by the reduction to ++* generalized upper Hessenberg form of a complex matrix pair (A,B): ++* ++* A = Q1*H*Z1**H, B = Q1*T*Z1**H, ++* ++* as computed by ZGGHRD. ++* ++* If JOB='S', then the Hessenberg-triangular pair (H,T) is ++* also reduced to generalized Schur form, ++* ++* H = Q*S*Z**H, T = Q*P*Z**H, ++* ++* where Q and Z are unitary matrices and S and P are upper triangular. ++* ++* Optionally, the unitary matrix Q from the generalized Schur ++* factorization may be postmultiplied into an input matrix Q1, and the ++* unitary matrix Z may be postmultiplied into an input matrix Z1. ++* If Q1 and Z1 are the unitary matrices from ZGGHRD that reduced ++* the matrix pair (A,B) to generalized Hessenberg form, then the output ++* matrices Q1*Q and Z1*Z are the unitary factors from the generalized ++* Schur factorization of (A,B): ++* ++* A = (Q1*Q)*S*(Z1*Z)**H, B = (Q1*Q)*P*(Z1*Z)**H. ++* ++* To avoid overflow, eigenvalues of the matrix pair (H,T) ++* (equivalently, of (A,B)) are computed as a pair of complex values ++* (alpha,beta). If beta is nonzero, lambda = alpha / beta is an ++* eigenvalue of the generalized nonsymmetric eigenvalue problem (GNEP) ++* A*x = lambda*B*x ++* and if alpha is nonzero, mu = beta / alpha is an eigenvalue of the ++* alternate form of the GNEP ++* mu*A*y = B*y. ++* The values of alpha and beta for the i-th eigenvalue can be read ++* directly from the generalized Schur form: alpha = S(i,i), ++* beta = P(i,i). + * + * Ref: C.B. Moler & G.W. Stewart, "An Algorithm for Generalized Matrix + * Eigenvalue Problems", SIAM J. Numer. Anal., 10(1973), +@@ -61,83 +82,88 @@ + * ========= + * + * JOB (input) CHARACTER*1 +-* = 'E': compute only ALPHA and BETA. A and B will not +-* necessarily be put into generalized Schur form. +-* = 'S': put A and B into generalized Schur form, as well +-* as computing ALPHA and BETA. ++* = 'E': Compute eigenvalues only; ++* = 'S': Computer eigenvalues and the Schur form. + * + * COMPQ (input) CHARACTER*1 +-* = 'N': do not modify Q. +-* = 'V': multiply the array Q on the right by the conjugate +-* transpose of the unitary tranformation that is +-* applied to the left side of A and B to reduce them +-* to Schur form. +-* = 'I': like COMPQ='V', except that Q will be initialized to +-* the identity first. ++* = 'N': Left Schur vectors (Q) are not computed; ++* = 'I': Q is initialized to the unit matrix and the matrix Q ++* of left Schur vectors of (H,T) is returned; ++* = 'V': Q must contain a unitary matrix Q1 on entry and ++* the product Q1*Q is returned. + * + * COMPZ (input) CHARACTER*1 +-* = 'N': do not modify Z. +-* = 'V': multiply the array Z on the right by the unitary +-* tranformation that is applied to the right side of +-* A and B to reduce them to Schur form. +-* = 'I': like COMPZ='V', except that Z will be initialized to +-* the identity first. ++* = 'N': Right Schur vectors (Z) are not computed; ++* = 'I': Q is initialized to the unit matrix and the matrix Z ++* of right Schur vectors of (H,T) is returned; ++* = 'V': Z must contain a unitary matrix Z1 on entry and ++* the product Z1*Z is returned. + * + * N (input) INTEGER +-* The order of the matrices A, B, Q, and Z. N >= 0. ++* The order of the matrices H, T, Q, and Z. N >= 0. + * + * ILO (input) INTEGER + * IHI (input) INTEGER +-* It is assumed that A is already upper triangular in rows and +-* columns 1:ILO-1 and IHI+1:N. +-* 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0. +-* +-* A (input/output) COMPLEX*16 array, dimension (LDA, N) +-* On entry, the N-by-N upper Hessenberg matrix A. Elements +-* below the subdiagonal must be zero. +-* If JOB='S', then on exit A and B will have been +-* simultaneously reduced to upper triangular form. +-* If JOB='E', then on exit A will have been destroyed. +-* +-* LDA (input) INTEGER +-* The leading dimension of the array A. LDA >= max( 1, N ). +-* +-* B (input/output) COMPLEX*16 array, dimension (LDB, N) +-* On entry, the N-by-N upper triangular matrix B. Elements +-* below the diagonal must be zero. +-* If JOB='S', then on exit A and B will have been +-* simultaneously reduced to upper triangular form. +-* If JOB='E', then on exit B will have been destroyed. ++* ILO and IHI mark the rows and columns of H which are in ++* Hessenberg form. It is assumed that A is already upper ++* triangular in rows and columns 1:ILO-1 and IHI+1:N. ++* If N > 0, 1 <= ILO <= IHI <= N; if N = 0, ILO=1 and IHI=0. ++* ++* H (input/output) COMPLEX*16 array, dimension (LDH, N) ++* On entry, the N-by-N upper Hessenberg matrix H. ++* On exit, if JOB = 'S', H contains the upper triangular ++* matrix S from the generalized Schur factorization. ++* If JOB = 'E', the diagonal of H matches that of S, but ++* the rest of H is unspecified. ++* ++* LDH (input) INTEGER ++* The leading dimension of the array H. LDH >= max( 1, N ). ++* ++* T (input/output) COMPLEX*16 array, dimension (LDT, N) ++* On entry, the N-by-N upper triangular matrix T. ++* On exit, if JOB = 'S', T contains the upper triangular ++* matrix P from the generalized Schur factorization. ++* If JOB = 'E', the diagonal of T matches that of P, but ++* the rest of T is unspecified. + * +-* LDB (input) INTEGER +-* The leading dimension of the array B. LDB >= max( 1, N ). ++* LDT (input) INTEGER ++* The leading dimension of the array T. LDT >= max( 1, N ). + * + * ALPHA (output) COMPLEX*16 array, dimension (N) +-* The diagonal elements of A when the pair (A,B) has been +-* reduced to Schur form. ALPHA(i)/BETA(i) i=1,...,N +-* are the generalized eigenvalues. ++* The complex scalars alpha that define the eigenvalues of ++* GNEP. ALPHA(i) = S(i,i) in the generalized Schur ++* factorization. + * + * BETA (output) COMPLEX*16 array, dimension (N) +-* The diagonal elements of B when the pair (A,B) has been +-* reduced to Schur form. ALPHA(i)/BETA(i) i=1,...,N +-* are the generalized eigenvalues. A and B are normalized +-* so that BETA(1),...,BETA(N) are non-negative real numbers. ++* The real non-negative scalars beta that define the ++* eigenvalues of GNEP. BETA(i) = P(i,i) in the generalized ++* Schur factorization. ++* ++* Together, the quantities alpha = ALPHA(j) and beta = BETA(j) ++* represent the j-th eigenvalue of the matrix pair (A,B), in ++* one of the forms lambda = alpha/beta or mu = beta/alpha. ++* Since either lambda or mu may overflow, they should not, ++* in general, be computed. + * + * Q (input/output) COMPLEX*16 array, dimension (LDQ, N) +-* If COMPQ='N', then Q will not be referenced. +-* If COMPQ='V' or 'I', then the conjugate transpose of the +-* unitary transformations which are applied to A and B on +-* the left will be applied to the array Q on the right. ++* On entry, if COMPZ = 'V', the unitary matrix Q1 used in the ++* reduction of (A,B) to generalized Hessenberg form. ++* On exit, if COMPZ = 'I', the unitary matrix of left Schur ++* vectors of (H,T), and if COMPZ = 'V', the unitary matrix of ++* left Schur vectors of (A,B). ++* Not referenced if COMPZ = 'N'. + * + * LDQ (input) INTEGER + * The leading dimension of the array Q. LDQ >= 1. + * If COMPQ='V' or 'I', then LDQ >= N. + * + * Z (input/output) COMPLEX*16 array, dimension (LDZ, N) +-* If COMPZ='N', then Z will not be referenced. +-* If COMPZ='V' or 'I', then the unitary transformations which +-* are applied to A and B on the right will be applied to the +-* array Z on the right. ++* On entry, if COMPZ = 'V', the unitary matrix Z1 used in the ++* reduction of (A,B) to generalized Hessenberg form. ++* On exit, if COMPZ = 'I', the unitary matrix of right Schur ++* vectors of (H,T), and if COMPZ = 'V', the unitary matrix of ++* right Schur vectors of (A,B). ++* Not referenced if COMPZ = 'N'. + * + * LDZ (input) INTEGER + * The leading dimension of the array Z. LDZ >= 1. +@@ -159,13 +185,12 @@ + * INFO (output) INTEGER + * = 0: successful exit + * < 0: if INFO = -i, the i-th argument had an illegal value +-* = 1,...,N: the QZ iteration did not converge. (A,B) is not ++* = 1,...,N: the QZ iteration did not converge. (H,T) is not + * in Schur form, but ALPHA(i) and BETA(i), + * i=INFO+1,...,N should be correct. +-* = N+1,...,2*N: the shift calculation failed. (A,B) is not ++* = N+1,...,2*N: the shift calculation failed. (H,T) is not + * in Schur form, but ALPHA(i) and BETA(i), + * i=INFO-N+1,...,N should be correct. +-* > 2*N: various "impossible" errors. + * + * Further Details + * =============== +@@ -192,7 +217,7 @@ + DOUBLE PRECISION ABSB, ANORM, ASCALE, ATOL, BNORM, BSCALE, BTOL, + $ C, OPST, SAFMIN, TEMP, TEMP2, TEMPR, ULP + COMPLEX*16 ABI22, AD11, AD12, AD21, AD22, CTEMP, CTEMP2, +- $ CTEMP3, ESHIFT, RTDISC, S, SHIFT, SIGNBC, T, ++ $ CTEMP3, ESHIFT, RTDISC, S, SHIFT, SIGNBC, T1, + $ U12, X + * .. + * .. External Functions .. +@@ -279,9 +304,9 @@ + INFO = -5 + ELSE IF( IHI.GT.N .OR. IHI.LT.ILO-1 ) THEN + INFO = -6 +- ELSE IF( LDA.LT.N ) THEN ++ ELSE IF( LDH.LT.N ) THEN + INFO = -8 +- ELSE IF( LDB.LT.N ) THEN ++ ELSE IF( LDT.LT.N ) THEN + INFO = -10 + ELSE IF( LDQ.LT.1 .OR. ( ILQ .AND. LDQ.LT.N ) ) THEN + INFO = -14 +@@ -317,8 +342,8 @@ + IN = IHI + 1 - ILO + SAFMIN = DLAMCH( 'S' ) + ULP = DLAMCH( 'E' )*DLAMCH( 'B' ) +- ANORM = ZLANHS( 'F', IN, A( ILO, ILO ), LDA, RWORK ) +- BNORM = ZLANHS( 'F', IN, B( ILO, ILO ), LDB, RWORK ) ++ ANORM = ZLANHS( 'F', IN, H( ILO, ILO ), LDH, RWORK ) ++ BNORM = ZLANHS( 'F', IN, T( ILO, ILO ), LDT, RWORK ) + ATOL = MAX( SAFMIN, ULP*ANORM ) + BTOL = MAX( SAFMIN, ULP*BNORM ) + ASCALE = ONE / MAX( SAFMIN, ANORM ) +@@ -335,18 +360,18 @@ + * Set Eigenvalues IHI+1:N + * + DO 10 J = IHI + 1, N +- ABSB = ABS( B( J, J ) ) ++ ABSB = ABS( T( J, J ) ) + IF( ABSB.GT.SAFMIN ) THEN +- SIGNBC = DCONJG( B( J, J ) / ABSB ) +- B( J, J ) = ABSB ++ SIGNBC = DCONJG( T( J, J ) / ABSB ) ++ T( J, J ) = ABSB + IF( ILSCHR ) THEN +- CALL ZSCAL( J-1, SIGNBC, B( 1, J ), 1 ) +- CALL ZSCAL( J, SIGNBC, A( 1, J ), 1 ) ++ CALL ZSCAL( J-1, SIGNBC, T( 1, J ), 1 ) ++ CALL ZSCAL( J, SIGNBC, H( 1, J ), 1 ) + * ----------------- Begin Timing Code --------------------- + OPST = OPST + DBLE( 12*( J-1 ) ) + * ------------------ End Timing Code ---------------------- + ELSE +- A( J, J ) = A( J, J )*SIGNBC ++ H( J, J ) = H( J, J )*SIGNBC + END IF + IF( ILZ ) + $ CALL ZSCAL( N, SIGNBC, Z( 1, J ), 1 ) +@@ -354,10 +379,10 @@ + OPST = OPST + DBLE( 6*NZ+13 ) + * -------------------- End Timing Code ----------------------- + ELSE +- B( J, J ) = CZERO ++ T( J, J ) = CZERO + END IF +- ALPHA( J ) = A( J, J ) +- BETA( J ) = B( J, J ) ++ ALPHA( J ) = H( J, J ) ++ BETA( J ) = T( J, J ) + 10 CONTINUE + * + * If IHI < ILO, skip QZ steps +@@ -402,22 +427,22 @@ + * Split the matrix if possible. + * + * Two tests: +-* 1: A(j,j-1)=0 or j=ILO +-* 2: B(j,j)=0 ++* 1: H(j,j-1)=0 or j=ILO ++* 2: T(j,j)=0 + * + * Special case: j=ILAST + * + IF( ILAST.EQ.ILO ) THEN + GO TO 60 + ELSE +- IF( ABS1( A( ILAST, ILAST-1 ) ).LE.ATOL ) THEN +- A( ILAST, ILAST-1 ) = CZERO ++ IF( ABS1( H( ILAST, ILAST-1 ) ).LE.ATOL ) THEN ++ H( ILAST, ILAST-1 ) = CZERO + GO TO 60 + END IF + END IF + * +- IF( ABS( B( ILAST, ILAST ) ).LE.BTOL ) THEN +- B( ILAST, ILAST ) = CZERO ++ IF( ABS( T( ILAST, ILAST ) ).LE.BTOL ) THEN ++ T( ILAST, ILAST ) = CZERO + GO TO 50 + END IF + * +@@ -425,30 +450,30 @@ + * + DO 40 J = ILAST - 1, ILO, -1 + * +-* Test 1: for A(j,j-1)=0 or j=ILO ++* Test 1: for H(j,j-1)=0 or j=ILO + * + IF( J.EQ.ILO ) THEN + ILAZRO = .TRUE. + ELSE +- IF( ABS1( A( J, J-1 ) ).LE.ATOL ) THEN +- A( J, J-1 ) = CZERO ++ IF( ABS1( H( J, J-1 ) ).LE.ATOL ) THEN ++ H( J, J-1 ) = CZERO + ILAZRO = .TRUE. + ELSE + ILAZRO = .FALSE. + END IF + END IF + * +-* Test 2: for B(j,j)=0 ++* Test 2: for T(j,j)=0 + * +- IF( ABS( B( J, J ) ).LT.BTOL ) THEN +- B( J, J ) = CZERO ++ IF( ABS( T( J, J ) ).LT.BTOL ) THEN ++ T( J, J ) = CZERO + * + * Test 1a: Check for 2 consecutive small subdiagonals in A + * + ILAZR2 = .FALSE. + IF( .NOT.ILAZRO ) THEN +- IF( ABS1( A( J, J-1 ) )*( ASCALE*ABS1( A( J+1, +- $ J ) ) ).LE.ABS1( A( J, J ) )*( ASCALE*ATOL ) ) ++ IF( ABS1( H( J, J-1 ) )*( ASCALE*ABS1( H( J+1, ++ $ J ) ) ).LE.ABS1( H( J, J ) )*( ASCALE*ATOL ) ) + $ ILAZR2 = .TRUE. + END IF + * +@@ -460,24 +485,24 @@ + * + IF( ILAZRO .OR. ILAZR2 ) THEN + DO 20 JCH = J, ILAST - 1 +- CTEMP = A( JCH, JCH ) +- CALL ZLARTG( CTEMP, A( JCH+1, JCH ), C, S, +- $ A( JCH, JCH ) ) +- A( JCH+1, JCH ) = CZERO +- CALL ZROT( ILASTM-JCH, A( JCH, JCH+1 ), LDA, +- $ A( JCH+1, JCH+1 ), LDA, C, S ) +- CALL ZROT( ILASTM-JCH, B( JCH, JCH+1 ), LDB, +- $ B( JCH+1, JCH+1 ), LDB, C, S ) ++ CTEMP = H( JCH, JCH ) ++ CALL ZLARTG( CTEMP, H( JCH+1, JCH ), C, S, ++ $ H( JCH, JCH ) ) ++ H( JCH+1, JCH ) = CZERO ++ CALL ZROT( ILASTM-JCH, H( JCH, JCH+1 ), LDH, ++ $ H( JCH+1, JCH+1 ), LDH, C, S ) ++ CALL ZROT( ILASTM-JCH, T( JCH, JCH+1 ), LDT, ++ $ T( JCH+1, JCH+1 ), LDT, C, S ) + IF( ILQ ) + $ CALL ZROT( N, Q( 1, JCH ), 1, Q( 1, JCH+1 ), 1, + $ C, DCONJG( S ) ) + IF( ILAZR2 ) +- $ A( JCH, JCH-1 ) = A( JCH, JCH-1 )*C ++ $ H( JCH, JCH-1 ) = H( JCH, JCH-1 )*C + ILAZR2 = .FALSE. + * --------------- Begin Timing Code ----------------- + OPST = OPST + DBLE( 32+40*( ILASTM-JCH )+20*NQ ) + * ---------------- End Timing Code ------------------ +- IF( ABS1( B( JCH+1, JCH+1 ) ).GE.BTOL ) THEN ++ IF( ABS1( T( JCH+1, JCH+1 ) ).GE.BTOL ) THEN + IF( JCH+1.GE.ILAST ) THEN + GO TO 60 + ELSE +@@ -485,35 +510,35 @@ + GO TO 70 + END IF + END IF +- B( JCH+1, JCH+1 ) = CZERO ++ T( JCH+1, JCH+1 ) = CZERO + 20 CONTINUE + GO TO 50 + ELSE + * +-* Only test 2 passed -- chase the zero to B(ILAST,ILAST) +-* Then process as in the case B(ILAST,ILAST)=0 ++* Only test 2 passed -- chase the zero to T(ILAST,ILAST) ++* Then process as in the case T(ILAST,ILAST)=0 + * + DO 30 JCH = J, ILAST - 1 +- CTEMP = B( JCH, JCH+1 ) +- CALL ZLARTG( CTEMP, B( JCH+1, JCH+1 ), C, S, +- $ B( JCH, JCH+1 ) ) +- B( JCH+1, JCH+1 ) = CZERO ++ CTEMP = T( JCH, JCH+1 ) ++ CALL ZLARTG( CTEMP, T( JCH+1, JCH+1 ), C, S, ++ $ T( JCH, JCH+1 ) ) ++ T( JCH+1, JCH+1 ) = CZERO + IF( JCH.LT.ILASTM-1 ) +- $ CALL ZROT( ILASTM-JCH-1, B( JCH, JCH+2 ), LDB, +- $ B( JCH+1, JCH+2 ), LDB, C, S ) +- CALL ZROT( ILASTM-JCH+2, A( JCH, JCH-1 ), LDA, +- $ A( JCH+1, JCH-1 ), LDA, C, S ) ++ $ CALL ZROT( ILASTM-JCH-1, T( JCH, JCH+2 ), LDT, ++ $ T( JCH+1, JCH+2 ), LDT, C, S ) ++ CALL ZROT( ILASTM-JCH+2, H( JCH, JCH-1 ), LDH, ++ $ H( JCH+1, JCH-1 ), LDH, C, S ) + IF( ILQ ) + $ CALL ZROT( N, Q( 1, JCH ), 1, Q( 1, JCH+1 ), 1, + $ C, DCONJG( S ) ) +- CTEMP = A( JCH+1, JCH ) +- CALL ZLARTG( CTEMP, A( JCH+1, JCH-1 ), C, S, +- $ A( JCH+1, JCH ) ) +- A( JCH+1, JCH-1 ) = CZERO +- CALL ZROT( JCH+1-IFRSTM, A( IFRSTM, JCH ), 1, +- $ A( IFRSTM, JCH-1 ), 1, C, S ) +- CALL ZROT( JCH-IFRSTM, B( IFRSTM, JCH ), 1, +- $ B( IFRSTM, JCH-1 ), 1, C, S ) ++ CTEMP = H( JCH+1, JCH ) ++ CALL ZLARTG( CTEMP, H( JCH+1, JCH-1 ), C, S, ++ $ H( JCH+1, JCH ) ) ++ H( JCH+1, JCH-1 ) = CZERO ++ CALL ZROT( JCH+1-IFRSTM, H( IFRSTM, JCH ), 1, ++ $ H( IFRSTM, JCH-1 ), 1, C, S ) ++ CALL ZROT( JCH-IFRSTM, T( IFRSTM, JCH ), 1, ++ $ T( IFRSTM, JCH-1 ), 1, C, S ) + IF( ILZ ) + $ CALL ZROT( N, Z( 1, JCH ), 1, Z( 1, JCH-1 ), 1, + $ C, S ) +@@ -543,40 +568,40 @@ + INFO = 2*N + 1 + GO TO 210 + * +-* B(ILAST,ILAST)=0 -- clear A(ILAST,ILAST-1) to split off a ++* T(ILAST,ILAST)=0 -- clear H(ILAST,ILAST-1) to split off a + * 1x1 block. + * + 50 CONTINUE +- CTEMP = A( ILAST, ILAST ) +- CALL ZLARTG( CTEMP, A( ILAST, ILAST-1 ), C, S, +- $ A( ILAST, ILAST ) ) +- A( ILAST, ILAST-1 ) = CZERO +- CALL ZROT( ILAST-IFRSTM, A( IFRSTM, ILAST ), 1, +- $ A( IFRSTM, ILAST-1 ), 1, C, S ) +- CALL ZROT( ILAST-IFRSTM, B( IFRSTM, ILAST ), 1, +- $ B( IFRSTM, ILAST-1 ), 1, C, S ) ++ CTEMP = H( ILAST, ILAST ) ++ CALL ZLARTG( CTEMP, H( ILAST, ILAST-1 ), C, S, ++ $ H( ILAST, ILAST ) ) ++ H( ILAST, ILAST-1 ) = CZERO ++ CALL ZROT( ILAST-IFRSTM, H( IFRSTM, ILAST ), 1, ++ $ H( IFRSTM, ILAST-1 ), 1, C, S ) ++ CALL ZROT( ILAST-IFRSTM, T( IFRSTM, ILAST ), 1, ++ $ T( IFRSTM, ILAST-1 ), 1, C, S ) + IF( ILZ ) + $ CALL ZROT( N, Z( 1, ILAST ), 1, Z( 1, ILAST-1 ), 1, C, S ) + * --------------------- Begin Timing Code ----------------------- + OPST = OPST + DBLE( 32+40*( ILAST-IFRSTM )+20*NZ ) + * ---------------------- End Timing Code ------------------------ + * +-* A(ILAST,ILAST-1)=0 -- Standardize B, set ALPHA and BETA ++* H(ILAST,ILAST-1)=0 -- Standardize B, set ALPHA and BETA + * + 60 CONTINUE +- ABSB = ABS( B( ILAST, ILAST ) ) ++ ABSB = ABS( T( ILAST, ILAST ) ) + IF( ABSB.GT.SAFMIN ) THEN +- SIGNBC = DCONJG( B( ILAST, ILAST ) / ABSB ) +- B( ILAST, ILAST ) = ABSB ++ SIGNBC = DCONJG( T( ILAST, ILAST ) / ABSB ) ++ T( ILAST, ILAST ) = ABSB + IF( ILSCHR ) THEN +- CALL ZSCAL( ILAST-IFRSTM, SIGNBC, B( IFRSTM, ILAST ), 1 ) +- CALL ZSCAL( ILAST+1-IFRSTM, SIGNBC, A( IFRSTM, ILAST ), ++ CALL ZSCAL( ILAST-IFRSTM, SIGNBC, T( IFRSTM, ILAST ), 1 ) ++ CALL ZSCAL( ILAST+1-IFRSTM, SIGNBC, H( IFRSTM, ILAST ), + $ 1 ) + * ----------------- Begin Timing Code --------------------- + OPST = OPST + DBLE( 12*( ILAST-IFRSTM ) ) + * ------------------ End Timing Code ---------------------- + ELSE +- A( ILAST, ILAST ) = A( ILAST, ILAST )*SIGNBC ++ H( ILAST, ILAST ) = H( ILAST, ILAST )*SIGNBC + END IF + IF( ILZ ) + $ CALL ZSCAL( N, SIGNBC, Z( 1, ILAST ), 1 ) +@@ -584,10 +609,10 @@ + OPST = OPST + DBLE( 6*NZ+13 ) + * -------------------- End Timing Code ----------------------- + ELSE +- B( ILAST, ILAST ) = CZERO ++ T( ILAST, ILAST ) = CZERO + END IF +- ALPHA( ILAST ) = A( ILAST, ILAST ) +- BETA( ILAST ) = B( ILAST, ILAST ) ++ ALPHA( ILAST ) = H( ILAST, ILAST ) ++ BETA( ILAST ) = T( ILAST, ILAST ) + * + * Go to next block -- exit if finished. + * +@@ -620,7 +645,7 @@ + * Compute the Shift. + * + * At this point, IFIRST < ILAST, and the diagonal elements of +-* B(IFIRST:ILAST,IFIRST,ILAST) are larger than BTOL (in ++* T(IFIRST:ILAST,IFIRST,ILAST) are larger than BTOL (in + * magnitude) + * + IF( ( IITER / 10 )*10.NE.IITER ) THEN +@@ -632,26 +657,26 @@ + * We factor B as U*D, where U has unit diagonals, and + * compute (A*inv(D))*inv(U). + * +- U12 = ( BSCALE*B( ILAST-1, ILAST ) ) / +- $ ( BSCALE*B( ILAST, ILAST ) ) +- AD11 = ( ASCALE*A( ILAST-1, ILAST-1 ) ) / +- $ ( BSCALE*B( ILAST-1, ILAST-1 ) ) +- AD21 = ( ASCALE*A( ILAST, ILAST-1 ) ) / +- $ ( BSCALE*B( ILAST-1, ILAST-1 ) ) +- AD12 = ( ASCALE*A( ILAST-1, ILAST ) ) / +- $ ( BSCALE*B( ILAST, ILAST ) ) +- AD22 = ( ASCALE*A( ILAST, ILAST ) ) / +- $ ( BSCALE*B( ILAST, ILAST ) ) ++ U12 = ( BSCALE*T( ILAST-1, ILAST ) ) / ++ $ ( BSCALE*T( ILAST, ILAST ) ) ++ AD11 = ( ASCALE*H( ILAST-1, ILAST-1 ) ) / ++ $ ( BSCALE*T( ILAST-1, ILAST-1 ) ) ++ AD21 = ( ASCALE*H( ILAST, ILAST-1 ) ) / ++ $ ( BSCALE*T( ILAST-1, ILAST-1 ) ) ++ AD12 = ( ASCALE*H( ILAST-1, ILAST ) ) / ++ $ ( BSCALE*T( ILAST, ILAST ) ) ++ AD22 = ( ASCALE*H( ILAST, ILAST ) ) / ++ $ ( BSCALE*T( ILAST, ILAST ) ) + ABI22 = AD22 - U12*AD21 + * +- T = HALF*( AD11+ABI22 ) +- RTDISC = SQRT( T**2+AD12*AD21-AD11*AD22 ) +- TEMP = DBLE( T-ABI22 )*DBLE( RTDISC ) + +- $ DIMAG( T-ABI22 )*DIMAG( RTDISC ) ++ T1 = HALF*( AD11+ABI22 ) ++ RTDISC = SQRT( T1**2+AD12*AD21-AD11*AD22 ) ++ TEMP = DBLE( T1-ABI22 )*DBLE( RTDISC ) + ++ $ DIMAG( T1-ABI22 )*DIMAG( RTDISC ) + IF( TEMP.LE.ZERO ) THEN +- SHIFT = T + RTDISC ++ SHIFT = T1 + RTDISC + ELSE +- SHIFT = T - RTDISC ++ SHIFT = T1 - RTDISC + END IF + * + * ------------------- Begin Timing Code ---------------------- +@@ -662,8 +687,8 @@ + * + * Exceptional shift. Chosen for no particularly good reason. + * +- ESHIFT = ESHIFT + DCONJG( ( ASCALE*A( ILAST-1, ILAST ) ) / +- $ ( BSCALE*B( ILAST-1, ILAST-1 ) ) ) ++ ESHIFT = ESHIFT + DCONJG( ( ASCALE*H( ILAST-1, ILAST ) ) / ++ $ ( BSCALE*T( ILAST-1, ILAST-1 ) ) ) + SHIFT = ESHIFT + * + * ------------------- Begin Timing Code ---------------------- +@@ -676,21 +701,21 @@ + * + DO 80 J = ILAST - 1, IFIRST + 1, -1 + ISTART = J +- CTEMP = ASCALE*A( J, J ) - SHIFT*( BSCALE*B( J, J ) ) ++ CTEMP = ASCALE*H( J, J ) - SHIFT*( BSCALE*T( J, J ) ) + TEMP = ABS1( CTEMP ) +- TEMP2 = ASCALE*ABS1( A( J+1, J ) ) ++ TEMP2 = ASCALE*ABS1( H( J+1, J ) ) + TEMPR = MAX( TEMP, TEMP2 ) + IF( TEMPR.LT.ONE .AND. TEMPR.NE.ZERO ) THEN + TEMP = TEMP / TEMPR + TEMP2 = TEMP2 / TEMPR + END IF +- IF( ABS1( A( J, J-1 ) )*TEMP2.LE.TEMP*ATOL ) ++ IF( ABS1( H( J, J-1 ) )*TEMP2.LE.TEMP*ATOL ) + $ GO TO 90 + 80 CONTINUE + * + ISTART = IFIRST +- CTEMP = ASCALE*A( IFIRST, IFIRST ) - +- $ SHIFT*( BSCALE*B( IFIRST, IFIRST ) ) ++ CTEMP = ASCALE*H( IFIRST, IFIRST ) - ++ $ SHIFT*( BSCALE*T( IFIRST, IFIRST ) ) + * + * --------------------- Begin Timing Code ----------------------- + OPST = OPST - DBLE( 6 ) +@@ -702,7 +727,7 @@ + * + * Initial Q + * +- CTEMP2 = ASCALE*A( ISTART+1, ISTART ) ++ CTEMP2 = ASCALE*H( ISTART+1, ISTART ) + * + * --------------------- Begin Timing Code ----------------------- + OPST = OPST + DBLE( 2+( ILAST-ISTART )*18 ) +@@ -714,18 +739,18 @@ + * + DO 150 J = ISTART, ILAST - 1 + IF( J.GT.ISTART ) THEN +- CTEMP = A( J, J-1 ) +- CALL ZLARTG( CTEMP, A( J+1, J-1 ), C, S, A( J, J-1 ) ) +- A( J+1, J-1 ) = CZERO ++ CTEMP = H( J, J-1 ) ++ CALL ZLARTG( CTEMP, H( J+1, J-1 ), C, S, H( J, J-1 ) ) ++ H( J+1, J-1 ) = CZERO + END IF + * + DO 100 JC = J, ILASTM +- CTEMP = C*A( J, JC ) + S*A( J+1, JC ) +- A( J+1, JC ) = -DCONJG( S )*A( J, JC ) + C*A( J+1, JC ) +- A( J, JC ) = CTEMP +- CTEMP2 = C*B( J, JC ) + S*B( J+1, JC ) +- B( J+1, JC ) = -DCONJG( S )*B( J, JC ) + C*B( J+1, JC ) +- B( J, JC ) = CTEMP2 ++ CTEMP = C*H( J, JC ) + S*H( J+1, JC ) ++ H( J+1, JC ) = -DCONJG( S )*H( J, JC ) + C*H( J+1, JC ) ++ H( J, JC ) = CTEMP ++ CTEMP2 = C*T( J, JC ) + S*T( J+1, JC ) ++ T( J+1, JC ) = -DCONJG( S )*T( J, JC ) + C*T( J+1, JC ) ++ T( J, JC ) = CTEMP2 + 100 CONTINUE + IF( ILQ ) THEN + DO 110 JR = 1, N +@@ -735,19 +760,19 @@ + 110 CONTINUE + END IF + * +- CTEMP = B( J+1, J+1 ) +- CALL ZLARTG( CTEMP, B( J+1, J ), C, S, B( J+1, J+1 ) ) +- B( J+1, J ) = CZERO ++ CTEMP = T( J+1, J+1 ) ++ CALL ZLARTG( CTEMP, T( J+1, J ), C, S, T( J+1, J+1 ) ) ++ T( J+1, J ) = CZERO + * + DO 120 JR = IFRSTM, MIN( J+2, ILAST ) +- CTEMP = C*A( JR, J+1 ) + S*A( JR, J ) +- A( JR, J ) = -DCONJG( S )*A( JR, J+1 ) + C*A( JR, J ) +- A( JR, J+1 ) = CTEMP ++ CTEMP = C*H( JR, J+1 ) + S*H( JR, J ) ++ H( JR, J ) = -DCONJG( S )*H( JR, J+1 ) + C*H( JR, J ) ++ H( JR, J+1 ) = CTEMP + 120 CONTINUE + DO 130 JR = IFRSTM, J +- CTEMP = C*B( JR, J+1 ) + S*B( JR, J ) +- B( JR, J ) = -DCONJG( S )*B( JR, J+1 ) + C*B( JR, J ) +- B( JR, J+1 ) = CTEMP ++ CTEMP = C*T( JR, J+1 ) + S*T( JR, J ) ++ T( JR, J ) = -DCONJG( S )*T( JR, J+1 ) + C*T( JR, J ) ++ T( JR, J+1 ) = CTEMP + 130 CONTINUE + IF( ILZ ) THEN + DO 140 JR = 1, N +@@ -793,18 +818,18 @@ + * Set Eigenvalues 1:ILO-1 + * + DO 200 J = 1, ILO - 1 +- ABSB = ABS( B( J, J ) ) ++ ABSB = ABS( T( J, J ) ) + IF( ABSB.GT.SAFMIN ) THEN +- SIGNBC = DCONJG( B( J, J ) / ABSB ) +- B( J, J ) = ABSB ++ SIGNBC = DCONJG( T( J, J ) / ABSB ) ++ T( J, J ) = ABSB + IF( ILSCHR ) THEN +- CALL ZSCAL( J-1, SIGNBC, B( 1, J ), 1 ) +- CALL ZSCAL( J, SIGNBC, A( 1, J ), 1 ) ++ CALL ZSCAL( J-1, SIGNBC, T( 1, J ), 1 ) ++ CALL ZSCAL( J, SIGNBC, H( 1, J ), 1 ) + * ----------------- Begin Timing Code --------------------- + OPST = OPST + DBLE( 12*( J-1 ) ) + * ------------------ End Timing Code ---------------------- + ELSE +- A( J, J ) = A( J, J )*SIGNBC ++ H( J, J ) = H( J, J )*SIGNBC + END IF + IF( ILZ ) + $ CALL ZSCAL( N, SIGNBC, Z( 1, J ), 1 ) +@@ -812,10 +837,10 @@ + OPST = OPST + DBLE( 6*NZ+13 ) + * -------------------- End Timing Code ----------------------- + ELSE +- B( J, J ) = CZERO ++ T( J, J ) = CZERO + END IF +- ALPHA( J ) = A( J, J ) +- BETA( J ) = B( J, J ) ++ ALPHA( J ) = H( J, J ) ++ BETA( J ) = T( J, J ) + 200 CONTINUE + * + * Normal Termination +diff -uNr LAPACK.orig/TIMING/EIG/EIGSRC/ztgevc.f LAPACK/TIMING/EIG/EIGSRC/ztgevc.f +--- LAPACK.orig/TIMING/EIG/EIGSRC/ztgevc.f Thu Nov 4 14:28:33 1999 ++++ LAPACK/TIMING/EIG/EIGSRC/ztgevc.f Fri May 25 16:20:52 2001 +@@ -1,19 +1,19 @@ +- SUBROUTINE ZTGEVC( SIDE, HOWMNY, SELECT, N, A, LDA, B, LDB, VL, ++ SUBROUTINE ZTGEVC( SIDE, HOWMNY, SELECT, N, S, LDS, P, LDP, VL, + $ LDVL, VR, LDVR, MM, M, WORK, RWORK, INFO ) + * + * -- LAPACK routine (instrumented to count operations, version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* June 30, 1999 ++* May 4, 2001 + * + * .. Scalar Arguments .. + CHARACTER HOWMNY, SIDE +- INTEGER INFO, LDA, LDB, LDVL, LDVR, M, MM, N ++ INTEGER INFO, LDP, LDS, LDVL, LDVR, M, MM, N + * .. + * .. Array Arguments .. + LOGICAL SELECT( * ) + DOUBLE PRECISION RWORK( * ) +- COMPLEX*16 A( LDA, * ), B( LDB, * ), VL( LDVL, * ), ++ COMPLEX*16 P( LDP, * ), S( LDS, * ), VL( LDVL, * ), + $ VR( LDVR, * ), WORK( * ) + * .. + * +@@ -34,28 +34,30 @@ + * Purpose + * ======= + * +-* ZTGEVC computes some or all of the right and/or left generalized +-* eigenvectors of a pair of complex upper triangular matrices (A,B). +-* +-* The right generalized eigenvector x and the left generalized +-* eigenvector y of (A,B) corresponding to a generalized eigenvalue +-* w are defined by: +-* +-* (A - wB) * x = 0 and y**H * (A - wB) = 0 +-* ++* ZTGEVC computes some or all of the right and/or left eigenvectors of ++* a pair of complex matrices (S,P), where S and P are upper triangular. ++* Matrix pairs of this type are produced by the generalized Schur ++* factorization of a complex matrix pair (A,B): ++* ++* A = Q*S*Z**H, B = Q*P*Z**H ++* ++* as computed by ZGGHRD + ZHGEQZ. ++* ++* The right eigenvector x and the left eigenvector y of (S,P) ++* corresponding to an eigenvalue w are defined by: ++* ++* S*x = w*P*x, (y**H)*S = w*(y**H)*P, ++* + * where y**H denotes the conjugate tranpose of y. +-* +-* If an eigenvalue w is determined by zero diagonal elements of both A +-* and B, a unit vector is returned as the corresponding eigenvector. +-* +-* If all eigenvectors are requested, the routine may either return +-* the matrices X and/or Y of right or left eigenvectors of (A,B), or +-* the products Z*X and/or Q*Y, where Z and Q are input unitary +-* matrices. If (A,B) was obtained from the generalized Schur +-* factorization of an original pair of matrices +-* (A0,B0) = (Q*A*Z**H,Q*B*Z**H), +-* then Z*X and Q*Y are the matrices of right or left eigenvectors of +-* A. ++* The eigenvalues are not input to this routine, but are computed ++* directly from the diagonal elements of S and P. ++* ++* This routine returns the matrices X and/or Y of right and left ++* eigenvectors of (S,P), or the products Z*X and/or Q*Y, ++* where Z and Q are input matrices. ++* If Q and Z are the unitary factors from the generalized Schur ++* factorization of a matrix pair (A,B), then Z*X and Q*Y ++* are the matrices of right and left eigenvectors of (A,B). + * + * Arguments + * ========= +@@ -67,70 +69,69 @@ + * + * HOWMNY (input) CHARACTER*1 + * = 'A': compute all right and/or left eigenvectors; +-* = 'B': compute all right and/or left eigenvectors, and +-* backtransform them using the input matrices supplied +-* in VR and/or VL; ++* = 'B': compute all right and/or left eigenvectors, ++* backtransformed by the matrices in VR and/or VL; + * = 'S': compute selected right and/or left eigenvectors, + * specified by the logical array SELECT. + * + * SELECT (input) LOGICAL array, dimension (N) + * If HOWMNY='S', SELECT specifies the eigenvectors to be +-* computed. +-* If HOWMNY='A' or 'B', SELECT is not referenced. +-* To select the eigenvector corresponding to the j-th +-* eigenvalue, SELECT(j) must be set to .TRUE.. ++* computed. The eigenvector corresponding to the j-th ++* eigenvalue is computed if SELECT(j) = .TRUE.. ++* Not referenced if HOWMNY = 'A' or 'B'. + * + * N (input) INTEGER +-* The order of the matrices A and B. N >= 0. +-* +-* A (input) COMPLEX*16 array, dimension (LDA,N) +-* The upper triangular matrix A. +-* +-* LDA (input) INTEGER +-* The leading dimension of array A. LDA >= max(1,N). ++* The order of the matrices S and P. N >= 0. + * +-* B (input) COMPLEX*16 array, dimension (LDB,N) +-* The upper triangular matrix B. B must have real diagonal +-* elements. ++* S (input) COMPLEX*16 array, dimension (LDS,N) ++* The upper triangular matrix S from a generalized Schur ++* factorization, as computed by ZHGEQZ. ++* ++* LDS (input) INTEGER ++* The leading dimension of array S. LDS >= max(1,N). ++* ++* P (input) COMPLEX*16 array, dimension (LDP,N) ++* The upper triangular matrix P from a generalized Schur ++* factorization, as computed by ZHGEQZ. P must have real ++* diagonal elements. + * +-* LDB (input) INTEGER +-* The leading dimension of array B. LDB >= max(1,N). ++* LDP (input) INTEGER ++* The leading dimension of array P. LDP >= max(1,N). + * + * VL (input/output) COMPLEX*16 array, dimension (LDVL,MM) + * On entry, if SIDE = 'L' or 'B' and HOWMNY = 'B', VL must + * contain an N-by-N matrix Q (usually the unitary matrix Q + * of left Schur vectors returned by ZHGEQZ). + * On exit, if SIDE = 'L' or 'B', VL contains: +-* if HOWMNY = 'A', the matrix Y of left eigenvectors of (A,B); ++* if HOWMNY = 'A', the matrix Y of left eigenvectors of (S,P); + * if HOWMNY = 'B', the matrix Q*Y; +-* if HOWMNY = 'S', the left eigenvectors of (A,B) specified by ++* if HOWMNY = 'S', the left eigenvectors of (S,P) specified by + * SELECT, stored consecutively in the columns of + * VL, in the same order as their eigenvalues. +-* If SIDE = 'R', VL is not referenced. ++* Not referenced if SIDE = 'R'. + * + * LDVL (input) INTEGER +-* The leading dimension of array VL. +-* LDVL >= max(1,N) if SIDE = 'L' or 'B'; LDVL >= 1 otherwise. ++* The leading dimension of array VL. LDVL >= 1, and if ++* SIDE = 'L' or 'l' or 'B' or 'b', LDVL >= N. + * + * VR (input/output) COMPLEX*16 array, dimension (LDVR,MM) + * On entry, if SIDE = 'R' or 'B' and HOWMNY = 'B', VR must + * contain an N-by-N matrix Q (usually the unitary matrix Z + * of right Schur vectors returned by ZHGEQZ). + * On exit, if SIDE = 'R' or 'B', VR contains: +-* if HOWMNY = 'A', the matrix X of right eigenvectors of (A,B); ++* if HOWMNY = 'A', the matrix X of right eigenvectors of (S,P); + * if HOWMNY = 'B', the matrix Z*X; +-* if HOWMNY = 'S', the right eigenvectors of (A,B) specified by ++* if HOWMNY = 'S', the right eigenvectors of (S,P) specified by + * SELECT, stored consecutively in the columns of + * VR, in the same order as their eigenvalues. +-* If SIDE = 'L', VR is not referenced. ++* Not referenced if SIDE = 'L'. + * + * LDVR (input) INTEGER +-* The leading dimension of the array VR. +-* LDVR >= max(1,N) if SIDE = 'R' or 'B'; LDVR >= 1 otherwise. ++* The leading dimension of the array VR. LDVR >= 1, and if ++* SIDE = 'R' or 'B', LDVR >= N. + * + * MM (input) INTEGER +-* The leading dimension of the array VR. +-* LDVR >= max(1,N) if SIDE = 'R' or 'B'; LDVR >= 1 otherwise. ++* The number of columns in the arrays VL and/or VR. MM >= M. + * + * M (output) INTEGER + * The number of columns in the arrays VL and/or VR actually +@@ -194,7 +195,7 @@ + IHWMNY = 2 + ILALL = .FALSE. + ILBACK = .FALSE. +- ELSE IF( LSAME( HOWMNY, 'B' ) .OR. LSAME( HOWMNY, 'T' ) ) THEN ++ ELSE IF( LSAME( HOWMNY, 'B' ) ) THEN + IHWMNY = 3 + ILALL = .TRUE. + ILBACK = .TRUE. +@@ -225,9 +226,9 @@ + INFO = -2 + ELSE IF( N.LT.0 ) THEN + INFO = -4 +- ELSE IF( LDA.LT.MAX( 1, N ) ) THEN ++ ELSE IF( LDS.LT.MAX( 1, N ) ) THEN + INFO = -6 +- ELSE IF( LDB.LT.MAX( 1, N ) ) THEN ++ ELSE IF( LDP.LT.MAX( 1, N ) ) THEN + INFO = -8 + END IF + IF( INFO.NE.0 ) THEN +@@ -251,7 +252,7 @@ + * + ILBBAD = .FALSE. + DO 20 J = 1, N +- IF( DIMAG( B( J, J ) ).NE.ZERO ) ++ IF( DIMAG( P( J, J ) ).NE.ZERO ) + $ ILBBAD = .TRUE. + 20 CONTINUE + * +@@ -289,19 +290,19 @@ + * part of A and B to check for possible overflow in the triangular + * solver. + * +- ANORM = ABS1( A( 1, 1 ) ) +- BNORM = ABS1( B( 1, 1 ) ) ++ ANORM = ABS1( S( 1, 1 ) ) ++ BNORM = ABS1( P( 1, 1 ) ) + RWORK( 1 ) = ZERO + RWORK( N+1 ) = ZERO + DO 40 J = 2, N + RWORK( J ) = ZERO + RWORK( N+J ) = ZERO + DO 30 I = 1, J - 1 +- RWORK( J ) = RWORK( J ) + ABS1( A( I, J ) ) +- RWORK( N+J ) = RWORK( N+J ) + ABS1( B( I, J ) ) ++ RWORK( J ) = RWORK( J ) + ABS1( S( I, J ) ) ++ RWORK( N+J ) = RWORK( N+J ) + ABS1( P( I, J ) ) + 30 CONTINUE +- ANORM = MAX( ANORM, RWORK( J )+ABS1( A( J, J ) ) ) +- BNORM = MAX( BNORM, RWORK( N+J )+ABS1( B( J, J ) ) ) ++ ANORM = MAX( ANORM, RWORK( J )+ABS1( S( J, J ) ) ) ++ BNORM = MAX( BNORM, RWORK( N+J )+ABS1( P( J, J ) ) ) + 40 CONTINUE + * + ASCALE = ONE / MAX( ANORM, SAFMIN ) +@@ -326,8 +327,8 @@ + IF( ILCOMP ) THEN + IEIG = IEIG + 1 + * +- IF( ABS1( A( JE, JE ) ).LE.SAFMIN .AND. +- $ ABS( DBLE( B( JE, JE ) ) ).LE.SAFMIN ) THEN ++ IF( ABS1( S( JE, JE ) ).LE.SAFMIN .AND. ++ $ ABS( DBLE( P( JE, JE ) ) ).LE.SAFMIN ) THEN + * + * Singular matrix pencil -- return unit eigenvector + * +@@ -343,10 +344,10 @@ + * H + * y ( a A - b B ) = 0 + * +- TEMP = ONE / MAX( ABS1( A( JE, JE ) )*ASCALE, +- $ ABS( DBLE( B( JE, JE ) ) )*BSCALE, SAFMIN ) +- SALPHA = ( TEMP*A( JE, JE ) )*ASCALE +- SBETA = ( TEMP*DBLE( B( JE, JE ) ) )*BSCALE ++ TEMP = ONE / MAX( ABS1( S( JE, JE ) )*ASCALE, ++ $ ABS( DBLE( P( JE, JE ) ) )*BSCALE, SAFMIN ) ++ SALPHA = ( TEMP*S( JE, JE ) )*ASCALE ++ SBETA = ( TEMP*DBLE( P( JE, JE ) ) )*BSCALE + ACOEFF = SBETA*ASCALE + BCOEFF = SALPHA*BSCALE + * +@@ -403,7 +404,7 @@ + * + * Compute + * j-1 +-* SUM = sum conjg( a*A(k,j) - b*B(k,j) )*x(k) ++* SUM = sum conjg( a*S(k,j) - b*P(k,j) )*x(k) + * k=je + * (Scale if necessary) + * +@@ -422,16 +423,16 @@ + SUMB = CZERO + * + DO 80 JR = JE, J - 1 +- SUMA = SUMA + DCONJG( A( JR, J ) )*WORK( JR ) +- SUMB = SUMB + DCONJG( B( JR, J ) )*WORK( JR ) ++ SUMA = SUMA + DCONJG( S( JR, J ) )*WORK( JR ) ++ SUMB = SUMB + DCONJG( P( JR, J ) )*WORK( JR ) + 80 CONTINUE + SUM = ACOEFF*SUMA - DCONJG( BCOEFF )*SUMB + * +-* Form x(j) = - SUM / conjg( a*A(j,j) - b*B(j,j) ) ++* Form x(j) = - SUM / conjg( a*S(j,j) - b*P(j,j) ) + * + * with scaling and perturbation of the denominator + * +- D = DCONJG( ACOEFF*A( J, J )-BCOEFF*B( J, J ) ) ++ D = DCONJG( ACOEFF*S( J, J )-BCOEFF*P( J, J ) ) + IF( ABS1( D ).LE.DMIN ) + $ D = DCMPLX( DMIN ) + * +@@ -511,8 +512,8 @@ + IF( ILCOMP ) THEN + IEIG = IEIG - 1 + * +- IF( ABS1( A( JE, JE ) ).LE.SAFMIN .AND. +- $ ABS( DBLE( B( JE, JE ) ) ).LE.SAFMIN ) THEN ++ IF( ABS1( S( JE, JE ) ).LE.SAFMIN .AND. ++ $ ABS( DBLE( P( JE, JE ) ) ).LE.SAFMIN ) THEN + * + * Singular matrix pencil -- return unit eigenvector + * +@@ -528,10 +529,10 @@ + * + * ( a A - b B ) x = 0 + * +- TEMP = ONE / MAX( ABS1( A( JE, JE ) )*ASCALE, +- $ ABS( DBLE( B( JE, JE ) ) )*BSCALE, SAFMIN ) +- SALPHA = ( TEMP*A( JE, JE ) )*ASCALE +- SBETA = ( TEMP*DBLE( B( JE, JE ) ) )*BSCALE ++ TEMP = ONE / MAX( ABS1( S( JE, JE ) )*ASCALE, ++ $ ABS( DBLE( P( JE, JE ) ) )*BSCALE, SAFMIN ) ++ SALPHA = ( TEMP*S( JE, JE ) )*ASCALE ++ SBETA = ( TEMP*DBLE( P( JE, JE ) ) )*BSCALE + ACOEFF = SBETA*ASCALE + BCOEFF = SALPHA*BSCALE + * +@@ -584,7 +585,7 @@ + * WORK(j+1:JE) contains x + * + DO 170 JR = 1, JE - 1 +- WORK( JR ) = ACOEFF*A( JR, JE ) - BCOEFF*B( JR, JE ) ++ WORK( JR ) = ACOEFF*S( JR, JE ) - BCOEFF*P( JR, JE ) + 170 CONTINUE + WORK( JE ) = CONE + * +@@ -593,7 +594,7 @@ + * Form x(j) := - w(j) / d + * with scaling and perturbation of the denominator + * +- D = ACOEFF*A( J, J ) - BCOEFF*B( J, J ) ++ D = ACOEFF*S( J, J ) - BCOEFF*P( J, J ) + IF( ABS1( D ).LE.DMIN ) + $ D = DCMPLX( DMIN ) + * +@@ -615,7 +616,7 @@ + * + IF( J.GT.1 ) THEN + * +-* w = w + x(j)*(a A(*,j) - b B(*,j) ) with scaling ++* w = w + x(j)*(a S(*,j) - b P(*,j) ) with scaling + * + IF( ABS1( WORK( J ) ).GT.ONE ) THEN + TEMP = ONE / ABS1( WORK( J ) ) +@@ -635,8 +636,8 @@ + CA = ACOEFF*WORK( J ) + CB = BCOEFF*WORK( J ) + DO 200 JR = 1, J - 1 +- WORK( JR ) = WORK( JR ) + CA*A( JR, J ) - +- $ CB*B( JR, J ) ++ WORK( JR ) = WORK( JR ) + CA*S( JR, J ) - ++ $ CB*P( JR, J ) + 200 CONTINUE + END IF + 210 CONTINUE +diff -uNr LAPACK.orig/TIMING/EIG/EIGSRC/ztrevc.f LAPACK/TIMING/EIG/EIGSRC/ztrevc.f +--- LAPACK.orig/TIMING/EIG/EIGSRC/ztrevc.f Thu Nov 4 14:28:34 1999 ++++ LAPACK/TIMING/EIG/EIGSRC/ztrevc.f Fri May 25 16:21:10 2001 +@@ -4,7 +4,7 @@ + * -- LAPACK routine (instrumented to count operations, version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* June 30, 1999 ++* May 7, 2001 + * + * .. Scalar Arguments .. + CHARACTER HOWMNY, SIDE +@@ -31,20 +31,23 @@ + * + * ZTREVC computes some or all of the right and/or left eigenvectors of + * a complex upper triangular matrix T. +-* ++* Matrices of this type are produced by the Schur factorization of ++* a complex general matrix: A = Q*T*Q**H, as computed by ZHSEQR. ++* + * The right eigenvector x and the left eigenvector y of T corresponding + * to an eigenvalue w are defined by: +-* +-* T*x = w*x, y'*T = w*y' +-* +-* where y' denotes the conjugate transpose of the vector y. +-* +-* If all eigenvectors are requested, the routine may either return the +-* matrices X and/or Y of right or left eigenvectors of T, or the +-* products Q*X and/or Q*Y, where Q is an input unitary +-* matrix. If T was obtained from the Schur factorization of an +-* original matrix A = Q*T*Q', then Q*X and Q*Y are the matrices of +-* right or left eigenvectors of A. ++* ++* T*x = w*x, (y**H)*T = w*(y**H) ++* ++* where y**H denotes the conjugate transpose of the vector y. ++* The eigenvalues are not input to this routine, but are read directly ++* from the diagonal of T. ++* ++* This routine returns the matrices X and/or Y of right and left ++* eigenvectors of T, or the products Q*X and/or Q*Y, where Q is an ++* input matrix. If Q is the unitary factor that reduces a matrix A to ++* Schur form T, then Q*X and Q*Y are the matrices of right and left ++* eigenvectors of A. + * + * Arguments + * ========= +@@ -57,17 +60,17 @@ + * HOWMNY (input) CHARACTER*1 + * = 'A': compute all right and/or left eigenvectors; + * = 'B': compute all right and/or left eigenvectors, +-* and backtransform them using the input matrices +-* supplied in VR and/or VL; ++* backtransformed using the matrices supplied in ++* VR and/or VL; + * = 'S': compute selected right and/or left eigenvectors, +-* specified by the logical array SELECT. ++* as indicated by the logical array SELECT. + * + * SELECT (input) LOGICAL array, dimension (N) + * If HOWMNY = 'S', SELECT specifies the eigenvectors to be + * computed. +-* If HOWMNY = 'A' or 'B', SELECT is not referenced. +-* To select the eigenvector corresponding to the j-th +-* eigenvalue, SELECT(j) must be set to .TRUE.. ++* The eigenvector corresponding to the j-th eigenvalue is ++* computed if SELECT(j) = .TRUE.. ++* Not referenced if HOWMNY = 'A' or 'B'. + * + * N (input) INTEGER + * The order of the matrix T. N >= 0. +@@ -85,19 +88,16 @@ + * Schur vectors returned by ZHSEQR). + * On exit, if SIDE = 'L' or 'B', VL contains: + * if HOWMNY = 'A', the matrix Y of left eigenvectors of T; +-* VL is lower triangular. The i-th column +-* VL(i) of VL is the eigenvector corresponding +-* to T(i,i). + * if HOWMNY = 'B', the matrix Q*Y; + * if HOWMNY = 'S', the left eigenvectors of T specified by + * SELECT, stored consecutively in the columns + * of VL, in the same order as their + * eigenvalues. +-* If SIDE = 'R', VL is not referenced. ++* Not referenced if SIDE = 'R'. + * + * LDVL (input) INTEGER +-* The leading dimension of the array VL. LDVL >= max(1,N) if +-* SIDE = 'L' or 'B'; LDVL >= 1 otherwise. ++* The leading dimension of the array VL. LDVL >= 1, and if ++* SIDE = 'L' or 'B', LDVL >= N. + * + * VR (input/output) COMPLEX*16 array, dimension (LDVR,MM) + * On entry, if SIDE = 'R' or 'B' and HOWMNY = 'B', VR must +@@ -105,19 +105,16 @@ + * Schur vectors returned by ZHSEQR). + * On exit, if SIDE = 'R' or 'B', VR contains: + * if HOWMNY = 'A', the matrix X of right eigenvectors of T; +-* VR is upper triangular. The i-th column +-* VR(i) of VR is the eigenvector corresponding +-* to T(i,i). + * if HOWMNY = 'B', the matrix Q*X; + * if HOWMNY = 'S', the right eigenvectors of T specified by + * SELECT, stored consecutively in the columns + * of VR, in the same order as their + * eigenvalues. +-* If SIDE = 'L', VR is not referenced. ++* Not referenced if SIDE = 'L'. + * + * LDVR (input) INTEGER +-* The leading dimension of the array VR. LDVR >= max(1,N) if +-* SIDE = 'R' or 'B'; LDVR >= 1 otherwise. ++* The leading dimension of the array VR. LDVR >= 1, and if ++* SIDE = 'R' or 'B'; LDVR >= N. + * + * MM (input) INTEGER + * The number of columns in the arrays VL and/or VR. MM >= M. +diff -uNr LAPACK.orig/TIMING/LIN/LINSRC/cgelss.f LAPACK/TIMING/LIN/LINSRC/cgelss.f +--- LAPACK.orig/TIMING/LIN/LINSRC/cgelss.f Thu Nov 4 14:28:16 1999 ++++ LAPACK/TIMING/LIN/LINSRC/cgelss.f Fri May 25 16:21:43 2001 +@@ -4,7 +4,7 @@ + * -- LAPACK driver routine (instrumented to count ops, version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* October 31, 1999 ++* April 25, 2001 + * + * .. Scalar Arguments .. + INTEGER INFO, LDA, LDB, LWORK, M, N, NRHS, RANK +@@ -98,10 +98,9 @@ + * LWORK >= 2*min(M,N) + max(M,N,NRHS) + * For good performance, LWORK should generally be larger. + * +-* If LWORK = -1, then a workspace query is assumed; the routine +-* only calculates the optimal size of the WORK array, returns +-* this value as the first entry of the WORK array, and no error +-* message related to LWORK is issued by XERBLA. ++* If LWORK = -1, a workspace query is assumed. The optimal ++* size for the WORK array is calculated and stored in WORK(1), ++* and no other work except argument checking is performed. + * + * RWORK (workspace) REAL array, dimension (5*min(M,N)) + * +@@ -187,7 +186,7 @@ + * immediately following subroutine, as returned by ILAENV.) + * + MINWRK = 1 +- IF( INFO.EQ.0 .AND. ( LWORK.GE.1 .OR. LQUERY ) ) THEN ++ IF( INFO.EQ.0 ) THEN + MAXWRK = 0 + MM = M + IF( M.GE.N .AND. M.GE.MNTHR ) THEN +@@ -255,13 +254,12 @@ + MAXWRK = MAX( MAXWRK, N*NRHS ) + END IF + END IF +- MINWRK = MAX( MINWRK, 1 ) + MAXWRK = MAX( MINWRK, MAXWRK ) + WORK( 1 ) = MAXWRK ++ IF( LWORK.LT.MINWRK .AND. .NOT.LQUERY ) ++ $ INFO = -12 + END IF + * +- IF( LWORK.LT.MINWRK .AND. .NOT.LQUERY ) +- $ INFO = -12 + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'CGELSS', -INFO ) + RETURN +@@ -632,10 +630,10 @@ + $ SOPBL3( 'CGEMM ', M, BL, M ) + T1 = SECOND( ) + CALL CGEMM( 'C', 'N', M, BL, M, CONE, WORK( IL ), LDWORK, +- $ B( 1, I ), LDB, CZERO, WORK( IWORK ), N ) ++ $ B( 1, I ), LDB, CZERO, WORK( IWORK ), M ) + T2 = SECOND( ) + TIMNG( GEMM ) = TIMNG( GEMM ) + ( T2-T1 ) +- CALL CLACPY( 'G', M, BL, WORK( IWORK ), N, B( 1, I ), ++ CALL CLACPY( 'G', M, BL, WORK( IWORK ), M, B( 1, I ), + $ LDB ) + 40 CONTINUE + ELSE +diff -uNr LAPACK.orig/TIMING/LIN/LINSRC/dgelss.f LAPACK/TIMING/LIN/LINSRC/dgelss.f +--- LAPACK.orig/TIMING/LIN/LINSRC/dgelss.f Thu Nov 4 14:28:17 1999 ++++ LAPACK/TIMING/LIN/LINSRC/dgelss.f Fri May 25 16:21:40 2001 +@@ -4,7 +4,7 @@ + * -- LAPACK driver routine (instrumented to count ops, version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* October 31, 1999 ++* April 25, 2001 + * + * .. Scalar Arguments .. + INTEGER INFO, LDA, LDB, LWORK, M, N, NRHS, RANK +@@ -97,10 +97,9 @@ + * LWORK >= 3*min(M,N) + max( 2*min(M,N), max(M,N), NRHS ) + * For good performance, LWORK should generally be larger. + * +-* If LWORK = -1, then a workspace query is assumed; the routine +-* only calculates the optimal size of the WORK array, returns +-* this value as the first entry of the WORK array, and no error +-* message related to LWORK is issued by XERBLA. ++* If LWORK = -1, a workspace query is assumed. The optimal ++* size for the WORK array is calculated and stored in WORK(1), ++* and no other work except argument checking is performed. + * + * INFO (output) INTEGER + * = 0: successful exit +@@ -178,7 +177,7 @@ + * following subroutine, as returned by ILAENV.) + * + MINWRK = 1 +- IF( INFO.EQ.0 .AND. ( LWORK.GE.1 .OR. LQUERY ) ) THEN ++ IF( INFO.EQ.0 ) THEN + MAXWRK = 0 + MM = M + IF( M.GE.N .AND. M.GE.MNTHR ) THEN +@@ -251,11 +250,10 @@ + END IF + MAXWRK = MAX( MINWRK, MAXWRK ) + WORK( 1 ) = MAXWRK ++ IF( LWORK.LT.MINWRK .AND. .NOT.LQUERY ) ++ $ INFO = -12 + END IF + * +- MINWRK = MAX( MINWRK, 1 ) +- IF( LWORK.LT.MINWRK .AND. .NOT.LQUERY ) +- $ INFO = -12 + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'DGELSS', -INFO ) + RETURN +@@ -613,10 +611,10 @@ + $ DOPBL3( 'DGEMM ', M, BL, M ) + T1 = DSECND( ) + CALL DGEMM( 'T', 'N', M, BL, M, ONE, WORK( IL ), LDWORK, +- $ B( 1, I ), LDB, ZERO, WORK( IWORK ), N ) ++ $ B( 1, I ), LDB, ZERO, WORK( IWORK ), M ) + T2 = DSECND( ) + TIMNG( GEMM ) = TIMNG( GEMM ) + ( T2-T1 ) +- CALL DLACPY( 'G', M, BL, WORK( IWORK ), N, B( 1, I ), ++ CALL DLACPY( 'G', M, BL, WORK( IWORK ), M, B( 1, I ), + $ LDB ) + 40 CONTINUE + ELSE +diff -uNr LAPACK.orig/TIMING/LIN/LINSRC/sgelss.f LAPACK/TIMING/LIN/LINSRC/sgelss.f +--- LAPACK.orig/TIMING/LIN/LINSRC/sgelss.f Thu Nov 4 14:28:18 1999 ++++ LAPACK/TIMING/LIN/LINSRC/sgelss.f Fri May 25 16:21:36 2001 +@@ -4,7 +4,7 @@ + * -- LAPACK driver routine (instrumented to count ops, version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* October 31, 1999 ++* April 25, 2001 + * + * .. Scalar Arguments .. + INTEGER INFO, LDA, LDB, LWORK, M, N, NRHS, RANK +@@ -97,10 +97,9 @@ + * LWORK >= 3*min(M,N) + max( 2*min(M,N), max(M,N), NRHS ) + * For good performance, LWORK should generally be larger. + * +-* If LWORK = -1, then a workspace query is assumed; the routine +-* only calculates the optimal size of the WORK array, returns +-* this value as the first entry of the WORK array, and no error +-* message related to LWORK is issued by XERBLA. ++* If LWORK = -1, a workspace query is assumed. The optimal ++* size for the WORK array is calculated and stored in WORK(1), ++* and no other work except argument checking is performed. + * + * INFO (output) INTEGER + * = 0: successful exit +@@ -178,7 +177,7 @@ + * following subroutine, as returned by ILAENV.) + * + MINWRK = 1 +- IF( INFO.EQ.0 .AND. ( LWORK.GE.1 .OR. LQUERY ) ) THEN ++ IF( INFO.EQ.0 ) THEN + MAXWRK = 0 + MM = M + IF( M.GE.N .AND. M.GE.MNTHR ) THEN +@@ -251,11 +250,10 @@ + END IF + MAXWRK = MAX( MINWRK, MAXWRK ) + WORK( 1 ) = MAXWRK ++ IF( LWORK.LT.MINWRK .AND. .NOT.LQUERY ) ++ $ INFO = -12 + END IF + * +- MINWRK = MAX( MINWRK, 1 ) +- IF( LWORK.LT.MINWRK .AND. .NOT.LQUERY ) +- $ INFO = -12 + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'SGELSS', -INFO ) + RETURN +@@ -613,10 +611,10 @@ + $ SOPBL3( 'SGEMM ', M, BL, M ) + T1 = SECOND( ) + CALL SGEMM( 'T', 'N', M, BL, M, ONE, WORK( IL ), LDWORK, +- $ B( 1, I ), LDB, ZERO, WORK( IWORK ), N ) ++ $ B( 1, I ), LDB, ZERO, WORK( IWORK ), M ) + T2 = SECOND( ) + TIMNG( GEMM ) = TIMNG( GEMM ) + ( T2-T1 ) +- CALL SLACPY( 'G', M, BL, WORK( IWORK ), N, B( 1, I ), ++ CALL SLACPY( 'G', M, BL, WORK( IWORK ), M, B( 1, I ), + $ LDB ) + 40 CONTINUE + ELSE +diff -uNr LAPACK.orig/TIMING/LIN/LINSRC/zgelss.f LAPACK/TIMING/LIN/LINSRC/zgelss.f +--- LAPACK.orig/TIMING/LIN/LINSRC/zgelss.f Thu Nov 4 14:28:18 1999 ++++ LAPACK/TIMING/LIN/LINSRC/zgelss.f Fri May 25 16:21:47 2001 +@@ -4,7 +4,7 @@ + * -- LAPACK driver routine (instrumented to count ops, version 3.0) -- + * Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + * Courant Institute, Argonne National Lab, and Rice University +-* October 31, 1999 ++* April 25, 2001 + * + * .. Scalar Arguments .. + INTEGER INFO, LDA, LDB, LWORK, M, N, NRHS, RANK +@@ -98,10 +98,9 @@ + * LWORK >= 2*min(M,N) + max(M,N,NRHS) + * For good performance, LWORK should generally be larger. + * +-* If LWORK = -1, then a workspace query is assumed; the routine +-* only calculates the optimal size of the WORK array, returns +-* this value as the first entry of the WORK array, and no error +-* message related to LWORK is issued by XERBLA. ++* If LWORK = -1, a workspace query is assumed. The optimal ++* size for the WORK array is calculated and stored in WORK(1), ++* and no other work except argument checking is performed. + * + * RWORK (workspace) DOUBLE PRECISION array, dimension (5*min(M,N)) + * +@@ -186,7 +185,7 @@ + * immediately following subroutine, as returned by ILAENV.) + * + MINWRK = 1 +- IF( INFO.EQ.0 .AND. ( LWORK.GE.1 .OR. LQUERY ) ) THEN ++ IF( INFO.EQ.0 ) THEN + MAXWRK = 0 + MM = M + IF( M.GE.N .AND. M.GE.MNTHR ) THEN +@@ -254,13 +253,12 @@ + MAXWRK = MAX( MAXWRK, N*NRHS ) + END IF + END IF +- MINWRK = MAX( MINWRK, 1 ) + MAXWRK = MAX( MINWRK, MAXWRK ) + WORK( 1 ) = MAXWRK ++ IF( LWORK.LT.MINWRK .AND. .NOT.LQUERY ) ++ $ INFO = -12 + END IF + * +- IF( LWORK.LT.MINWRK .AND. .NOT.LQUERY ) +- $ INFO = -12 + IF( INFO.NE.0 ) THEN + CALL XERBLA( 'ZGELSS', -INFO ) + RETURN +@@ -631,10 +629,10 @@ + $ DOPBL3( 'ZGEMM ', M, BL, M ) + T1 = DSECND( ) + CALL ZGEMM( 'C', 'N', M, BL, M, CONE, WORK( IL ), LDWORK, +- $ B( 1, I ), LDB, CZERO, WORK( IWORK ), N ) ++ $ B( 1, I ), LDB, CZERO, WORK( IWORK ), M ) + T2 = DSECND( ) + TIMNG( GEMM ) = TIMNG( GEMM ) + ( T2-T1 ) +- CALL ZLACPY( 'G', M, BL, WORK( IWORK ), N, B( 1, I ), ++ CALL ZLACPY( 'G', M, BL, WORK( IWORK ), M, B( 1, I ), + $ LDB ) + 40 CONTINUE + ELSE +diff -uNr LAPACK.orig/TIMING/Makefile LAPACK/TIMING/Makefile +--- LAPACK.orig/TIMING/Makefile Thu Nov 4 14:27:54 1999 ++++ LAPACK/TIMING/Makefile Fri May 25 16:17:35 2001 +@@ -141,242 +141,242 @@ + + stime.out: stime.in xlintims + @echo Timing square REAL LAPACK linear equation routines +- xlintims < stime.in > $@ 2>&1 ++ ./xlintims < stime.in > $@ 2>&1 + + STIME.out: STIME.in xlintims + @echo Timing square REAL LAPACK linear equation routines +- xlintims < STIME.in > $@ 2>&1 ++ ./xlintims < STIME.in > $@ 2>&1 + + sband.out: sband.in xlintims + @echo Timing banded REAL LAPACK linear equation routines +- xlintims < sband.in > $@ 2>&1 ++ ./xlintims < sband.in > $@ 2>&1 + + SBAND.out: SBAND.in xlintims + @echo Timing banded REAL LAPACK linear equation routines +- xlintims < SBAND.in > $@ 2>&1 ++ ./xlintims < SBAND.in > $@ 2>&1 + + stime2.out: stime2.in xlintims + @echo Timing rectangular REAL LAPACK linear equation routines +- xlintims < stime2.in > $@ 2>&1 ++ ./xlintims < stime2.in > $@ 2>&1 + + STIME2.out: STIME2.in xlintims + @echo Timing rectangular REAL LAPACK linear equation routines +- xlintims < STIME2.in > $@ 2>&1 ++ ./xlintims < STIME2.in > $@ 2>&1 + # + # ======== COMPLEX LIN TIMINGS ========================== + + ctime.out: ctime.in xlintimc + @echo Timing square COMPLEX LAPACK linear equation routines +- xlintimc < ctime.in > $@ 2>&1 ++ ./xlintimc < ctime.in > $@ 2>&1 + + CTIME.out: CTIME.in xlintimc + @echo Timing square COMPLEX LAPACK linear equation routines +- xlintimc < CTIME.in > $@ 2>&1 ++ ./xlintimc < CTIME.in > $@ 2>&1 + + cband.out: cband.in xlintimc + @echo Timing banded COMPLEX LAPACK linear equation routines +- xlintimc < cband.in > $@ 2>&1 ++ ./xlintimc < cband.in > $@ 2>&1 + + CBAND.out: CBAND.in xlintimc + @echo Timing banded COMPLEX LAPACK linear equation routines +- xlintimc < CBAND.in > $@ 2>&1 ++ ./xlintimc < CBAND.in > $@ 2>&1 + + ctime2.out: ctime2.in xlintimc + @echo Timing rectangular COMPLEX LAPACK linear equation routines +- xlintimc < ctime2.in > $@ 2>&1 ++ ./xlintimc < ctime2.in > $@ 2>&1 + + CTIME2.out: CTIME2.in xlintimc + @echo Timing rectangular COMPLEX LAPACK linear equation routines +- xlintimc < CTIME2.in > $@ 2>&1 ++ ./xlintimc < CTIME2.in > $@ 2>&1 + # + # ======== DOUBLE LIN TIMINGS =========================== + + dtime.out: dtime.in xlintimd + @echo Timing square DOUBLE PRECISION LAPACK linear equation routines +- xlintimd < dtime.in > $@ 2>&1 ++ ./xlintimd < dtime.in > $@ 2>&1 + + DTIME.out: DTIME.in xlintimd + @echo Timing square DOUBLE PRECISION LAPACK linear equation routines +- xlintimd < DTIME.in > $@ 2>&1 ++ ./xlintimd < DTIME.in > $@ 2>&1 + + dband.out: dband.in xlintimd + @echo Timing banded DOUBLE PRECISION LAPACK linear equation routines +- xlintimd < dband.in > $@ 2>&1 ++ ./xlintimd < dband.in > $@ 2>&1 + + DBAND.out: dband.in xlintimd + @echo Timing banded DOUBLE PRECISION LAPACK linear equation routines +- xlintimd < DBAND.in > $@ 2>&1 ++ ./xlintimd < DBAND.in > $@ 2>&1 + + dtime2.out: dtime2.in xlintimd + @echo Timing rectangular DOUBLE PRECISION LAPACK linear equation routines +- xlintimd < dtime2.in > $@ 2>&1 ++ ./xlintimd < dtime2.in > $@ 2>&1 + + DTIME2.out: DTIME2.in xlintimd + @echo Timing rectangular DOUBLE PRECISION LAPACK linear equation routines +- xlintimd < DTIME2.in > $@ 2>&1 ++ ./xlintimd < DTIME2.in > $@ 2>&1 + # + # ======== COMPLEX16 LIN TIMINGS ======================== + + ztime.out: ztime.in xlintimz + @echo Timing square COMPLEX16 LAPACK linear equation routines +- xlintimz < ztime.in > $@ 2>&1 ++ ./xlintimz < ztime.in > $@ 2>&1 + + ZTIME.out: ztime.in xlintimz + @echo Timing square COMPLEX16 LAPACK linear equation routines +- xlintimz < ZTIME.in > $@ 2>&1 ++ ./xlintimz < ZTIME.in > $@ 2>&1 + + zband.out: zband.in xlintimz + @echo Timing banded COMPLEX16 LAPACK linear equation routines +- xlintimz < zband.in > $@ 2>&1 ++ ./xlintimz < zband.in > $@ 2>&1 + + ZBAND.out: ZBAND.in xlintimz + @echo Timing banded COMPLEX16 LAPACK linear equation routines +- xlintimz < ZBAND.in > $@ 2>&1 ++ ./xlintimz < ZBAND.in > $@ 2>&1 + + ztime2.out: ztime2.in xlintimz + @echo Timing rectangular COMPLEX16 LAPACK linear equation routines +- xlintimz < ztime2.in > $@ 2>&1 ++ ./xlintimz < ztime2.in > $@ 2>&1 + + ZTIME2.out: ZTIME2.in xlintimz + @echo Timing rectangular COMPLEX16 LAPACK linear equation routines +- xlintimz < ZTIME2.in > $@ 2>&1 ++ ./xlintimz < ZTIME2.in > $@ 2>&1 + # + # + # ======== SINGLE EIG TIMINGS =========================== + # + sgeptim.out: sgeptim.in xeigtims + @echo GEP: Timing REAL Generalized Nonsymmetric Eigenvalue Problem routines +- xeigtims < sgeptim.in > $@ 2>&1 ++ ./xeigtims < sgeptim.in > $@ 2>&1 + + SGEPTIM.out: SGEPTIM.in xeigtims + @echo GEP: Timing REAL Generalized Nonsymmetric Eigenvalue Problem routines +- xeigtims < SGEPTIM.in > $@ 2>&1 ++ ./xeigtims < SGEPTIM.in > $@ 2>&1 + + sneptim.out: sneptim.in xeigtims + @echo NEP: Timing REAL Nonsymmetric Eigenvalue Problem routines +- xeigtims < sneptim.in > $@ 2>&1 ++ ./xeigtims < sneptim.in > $@ 2>&1 + + SNEPTIM.out: SNEPTIM.in xeigtims + @echo NEP: Timing REAL Nonsymmetric Eigenvalue Problem routines +- xeigtims < SNEPTIM.in > $@ 2>&1 ++ ./xeigtims < SNEPTIM.in > $@ 2>&1 + + sseptim.out: sseptim.in xeigtims + @echo SEP: Timing REAL Symmetric Eigenvalue Problem routines +- xeigtims < sseptim.in > $@ 2>&1 ++ ./xeigtims < sseptim.in > $@ 2>&1 + + SSEPTIM.out: SSEPTIM.in xeigtims + @echo SEP: Timing REAL Symmetric Eigenvalue Problem routines +- xeigtims < SSEPTIM.in > $@ 2>&1 ++ ./xeigtims < SSEPTIM.in > $@ 2>&1 + + ssvdtim.out: ssvdtim.in xeigtims + @echo SVD: Timing REAL Singular Value Decomposition routines +- xeigtims < ssvdtim.in > $@ 2>&1 ++ ./xeigtims < ssvdtim.in > $@ 2>&1 + + SSVDTIM.out: SSVDTIM.in xeigtims + @echo SVD: Timing REAL Singular Value Decomposition routines +- xeigtims < SSVDTIM.in > $@ 2>&1 ++ ./xeigtims < SSVDTIM.in > $@ 2>&1 + # + # ======== COMPLEX EIG TIMINGS =========================== + # + cgeptim.out: cgeptim.in xeigtimc + @echo GEP: Timing COMPLEX Generalized Nonsymmetric Eigenvalue Problem routines +- xeigtimc < cgeptim.in > $@ 2>&1 ++ ./xeigtimc < cgeptim.in > $@ 2>&1 + + CGEPTIM.out: CGEPTIM.in xeigtimc + @echo GEP: Timing COMPLEX Generalized Nonsymmetric Eigenvalue Problem routines +- xeigtimc < cgeptim.in > $@ 2>&1 ++ ./xeigtimc < cgeptim.in > $@ 2>&1 + + cneptim.out: cneptim.in xeigtimc + @echo NEP: Timing COMPLEX Nonsymmetric Eigenvalue Problem routines +- xeigtimc < cneptim.in > $@ 2>&1 ++ ./xeigtimc < cneptim.in > $@ 2>&1 + + CNEPTIM.out: CNEPTIM.in xeigtimc + @echo NEP: Timing COMPLEX Nonsymmetric Eigenvalue Problem routines +- xeigtimc < CNEPTIM.in > $@ 2>&1 ++ ./xeigtimc < CNEPTIM.in > $@ 2>&1 + + cseptim.out: cseptim.in xeigtimc + @echo SEP: Timing COMPLEX Symmetric Eigenvalue Problem routines +- xeigtimc < cseptim.in > $@ 2>&1 ++ ./xeigtimc < cseptim.in > $@ 2>&1 + + CSEPTIM.out: CSEPTIM.in xeigtimc + @echo SEP: Timing COMPLEX Symmetric Eigenvalue Problem routines +- xeigtimc < CSEPTIM.in > $@ 2>&1 ++ ./xeigtimc < CSEPTIM.in > $@ 2>&1 + + csvdtim.out: csvdtim.in xeigtimc + @echo SVD: Timing COMPLEX Singular Value Decomposition routines +- xeigtimc < csvdtim.in > $@ 2>&1 ++ ./xeigtimc < csvdtim.in > $@ 2>&1 + + CSVDTIM.out: CSVDTIM.in xeigtimc + @echo SVD: Timing COMPLEX Singular Value Decomposition routines +- xeigtimc < CSVDTIM.in > $@ 2>&1 ++ ./xeigtimc < CSVDTIM.in > $@ 2>&1 + # + # ======== DOUBLE EIG TIMINGS =========================== + # + dgeptim.out: dgeptim.in xeigtimd + @echo GEP: Timing DOUBLE PRECISION Generalized Nonsymmetric Eigenvalue Problem routines +- xeigtimd < dgeptim.in > $@ 2>&1 ++ ./xeigtimd < dgeptim.in > $@ 2>&1 + + DGEPTIM.out: DGEPTIM.in xeigtimd + @echo GEP: Timing DOUBLE PRECISION Generalized Nonsymmetric Eigenvalue Problem routines +- xeigtimd < dgeptim.in > $@ 2>&1 ++ ./xeigtimd < dgeptim.in > $@ 2>&1 + + dneptim.out: dneptim.in xeigtimd + @echo NEP: Timing DOUBLE PRECISION Nonsymmetric Eigenvalue Problem routines +- xeigtimd < dneptim.in > $@ 2>&1 ++ ./xeigtimd < dneptim.in > $@ 2>&1 + + DNEPTIM.out: DNEPTIM.in xeigtimd + @echo NEP: Timing DOUBLE PRECISION Nonsymmetric Eigenvalue Problem routines +- xeigtimd < DNEPTIM.in > $@ 2>&1 ++ ./xeigtimd < DNEPTIM.in > $@ 2>&1 + + dseptim.out: dseptim.in xeigtimd + @echo SEP: Timing DOUBLE PRECISION Symmetric Eigenvalue Problem routines +- xeigtimd < dseptim.in > $@ 2>&1 ++ ./xeigtimd < dseptim.in > $@ 2>&1 + + DSEPTIM.out: DSEPTIM.in xeigtimd + @echo SEP: Timing DOUBLE PRECISION Symmetric Eigenvalue Problem routines +- xeigtimd < DSEPTIM.in > $@ 2>&1 ++ ./xeigtimd < DSEPTIM.in > $@ 2>&1 + + dsvdtim.out: dsvdtim.in xeigtimd + @echo SVD: Timing DOUBLE PRECISION Singular Value Decomposition routines +- xeigtimd < dsvdtim.in > $@ 2>&1 ++ ./xeigtimd < dsvdtim.in > $@ 2>&1 + + DSVDTIM.out: DSVDTIM.in xeigtimd + @echo SVD: Timing DOUBLE PRECISION Singular Value Decomposition routines +- xeigtimd < DSVDTIM.in > $@ 2>&1 ++ ./xeigtimd < DSVDTIM.in > $@ 2>&1 + # + # ======== COMPLEX16 EIG TIMINGS =========================== + # + zgeptim.out: zgeptim.in xeigtimz + @echo GEP: Timing COMPLEX16 Generalized Nonsymmetric Eigenvalue Problem routines +- xeigtimz < zgeptim.in > $@ 2>&1 ++ ./xeigtimz < zgeptim.in > $@ 2>&1 + + ZGEPTIM.out: ZGEPTIM.in xeigtimz + @echo GEP: Timing COMPLEX16 Generalized Nonsymmetric Eigenvalue Problem routines +- xeigtimz < zgeptim.in > $@ 2>&1 ++ ./xeigtimz < zgeptim.in > $@ 2>&1 + + zneptim.out: zneptim.in xeigtimz + @echo NEP: Timing COMPLEX16 Nonsymmetric Eigenvalue Problem routines +- xeigtimz < zneptim.in > $@ 2>&1 ++ ./xeigtimz < zneptim.in > $@ 2>&1 + + ZNEPTIM.out: ZNEPTIM.in xeigtimz + @echo NEP: Timing COMPLEX16 Nonsymmetric Eigenvalue Problem routines +- xeigtimz < ZNEPTIM.in > $@ 2>&1 ++ ./xeigtimz < ZNEPTIM.in > $@ 2>&1 + + zseptim.out: zseptim.in xeigtimz + @echo SEP: Timing COMPLEX16 Symmetric Eigenvalue Problem routines +- xeigtimz < zseptim.in > $@ 2>&1 ++ ./xeigtimz < zseptim.in > $@ 2>&1 + + ZSEPTIM.out: ZSEPTIM.in xeigtimz + @echo SEP: Timing COMPLEX16 Symmetric Eigenvalue Problem routines +- xeigtimz < ZSEPTIM.in > $@ 2>&1 ++ ./xeigtimz < ZSEPTIM.in > $@ 2>&1 + + zsvdtim.out: zsvdtim.in xeigtimz + @echo SVD: Timing COMPLEX16 Singular Value Decomposition routines +- xeigtimz < zsvdtim.in > $@ 2>&1 ++ ./xeigtimz < zsvdtim.in > $@ 2>&1 + + ZSVDTIM.out: ZSVDTIM.in xeigtimz + @echo SVD: Timing COMPLEX16 Singular Value Decomposition routines +- xeigtimz < ZSVDTIM.in > $@ 2>&1 ++ ./xeigtimz < ZSVDTIM.in > $@ 2>&1 + # ============================================================================== + + xlintims: diff --git a/lapack-3.1.0-make.inc.patch b/lapack-3.1.0-make.inc.patch new file mode 100644 index 0000000..348f055 --- /dev/null +++ b/lapack-3.1.0-make.inc.patch @@ -0,0 +1,29 @@ +--- lapack-3.1.0/make.inc.BAD 2006-11-14 15:58:41.000000000 -0600 ++++ lapack-3.1.0/make.inc 2006-11-14 16:00:03.000000000 -0600 +@@ -10,7 +10,7 @@ + # + # The machine (platform) identifier to append to the library names + # +-PLAT = _LINUX ++PLAT = + # + # Modify the FORTRAN and OPTS definitions to refer to the + # compiler and desired compiler options for your machine. NOOPT +@@ -18,12 +18,13 @@ + # selected. Define LOADER and LOADOPTS to refer to the loader and + # desired load options for your machine. + # +-FORTRAN = g77 +-OPTS = -funroll-all-loops -O3 -Wimplicit -Wno-globals -Wunused ++FORTRAN = gfortran ++#OPTS = -funroll-all-loops -O3 -Wimplicit -Wno-globals -Wunused + #OPTS = -O0 -g -Wall + DRVOPTS = $(OPTS) +-NOOPT = -Wimplicit -Wno-globals -Wunused +-LOADER = g77 ++# NOOPT = -Wimplicit -Wno-globals -Wunused ++NOOPT = ++LOADER = gfortran + LOADOPTS = + # + # The archiver and the flag(s) to use when building archive (library) diff --git a/lapack-3.1.0-no-external-etime.patch b/lapack-3.1.0-no-external-etime.patch new file mode 100644 index 0000000..5cbfc0e --- /dev/null +++ b/lapack-3.1.0-no-external-etime.patch @@ -0,0 +1,20 @@ +--- lapack-3.1.0/INSTALL/dsecnd.f.BAD 2007-01-05 20:35:09.000000000 -0600 ++++ lapack-3.1.0/INSTALL/dsecnd.f 2007-01-05 20:35:16.000000000 -0600 +@@ -20,7 +20,6 @@ + * .. + * .. External Functions .. + REAL ETIME +- EXTERNAL ETIME + * .. + * .. Executable Statements .. + * +--- lapack-3.1.0/INSTALL/second.f.BAD 2007-01-05 20:35:39.000000000 -0600 ++++ lapack-3.1.0/INSTALL/second.f 2007-01-05 20:35:45.000000000 -0600 +@@ -20,7 +20,6 @@ + * .. + * .. External Functions .. + REAL ETIME +- EXTERNAL ETIME + * .. + * .. Executable Statements .. + * diff --git a/lapack-3.1.1-make.inc.patch b/lapack-3.1.1-make.inc.patch new file mode 100644 index 0000000..c807ccb --- /dev/null +++ b/lapack-3.1.1-make.inc.patch @@ -0,0 +1,23 @@ +--- lapack-3.1.1/INSTALL/make.inc.gfortran.BAD 2007-05-25 15:34:55.000000000 -0500 ++++ lapack-3.1.1/INSTALL/make.inc.gfortran 2007-05-25 15:35:51.000000000 -0500 +@@ -8,7 +8,7 @@ SHELL = /bin/sh + # + # The machine (platform) identifier to append to the library names + # +-PLAT = _LINUX ++PLAT = + # + # Modify the FORTRAN and OPTS definitions to refer to the + # compiler and desired compiler options for your machine. NOOPT +@@ -17,9 +17,9 @@ PLAT = _LINUX + # desired load options for your machine. + # + FORTRAN = gfortran +-OPTS = -O2 ++#OPTS = -O2 + DRVOPTS = $(OPTS) +-NOOPT = -O0 ++NOOPT = + LOADER = gfortran + LOADOPTS = + # diff --git a/lapack-3.4.0-make.inc.patch b/lapack-3.4.0-make.inc.patch new file mode 100644 index 0000000..2aaa092 --- /dev/null +++ b/lapack-3.4.0-make.inc.patch @@ -0,0 +1,15 @@ +diff -up lapack-3.4.0/INSTALL/make.inc.gfortran.orig lapack-3.4.0/INSTALL/make.inc.gfortran +--- lapack-3.4.0/INSTALL/make.inc.gfortran.orig 2011-11-11 16:08:04.000000000 -0500 ++++ lapack-3.4.0/INSTALL/make.inc.gfortran 2011-11-28 14:56:08.990672240 -0500 +@@ -13,9 +13,9 @@ SHELL = /bin/sh + # desired load options for your machine. + # + FORTRAN = gfortran +-OPTS = -O2 -frecursive ++#OPTS = -O2 -frecursive + DRVOPTS = $(OPTS) +-NOOPT = -O0 -frecursive ++NOOPT = + LOADER = gfortran + LOADOPTS = + # diff --git a/lapack-3.4.1-lapacke-disable-testing-functions.patch b/lapack-3.4.1-lapacke-disable-testing-functions.patch new file mode 100644 index 0000000..0fa2990 --- /dev/null +++ b/lapack-3.4.1-lapacke-disable-testing-functions.patch @@ -0,0 +1,12 @@ +diff -up lapack-3.4.1/LAPACKE/src/Makefile.disable-functions lapack-3.4.1/LAPACKE/src/Makefile +--- lapack-3.4.1/LAPACKE/src/Makefile.disable-functions 2012-10-24 11:19:46.054150834 -0400 ++++ lapack-3.4.1/LAPACKE/src/Makefile 2012-10-24 11:19:54.430152048 -0400 +@@ -2040,7 +2040,7 @@ lapacke_slagsy_work.o \ + lapacke_zlagsy.o \ + lapacke_zlagsy_work.o + +-ALLOBJ = $(SRC_OBJ) $(MATGEN_OBJ) ++ALLOBJ = $(SRC_OBJ) + + ifdef USEXBLAS + ALLXOBJ = $(SXLASRC) $(DXLASRC) $(CXLASRC) $(ZXLASRC) diff --git a/lapack-3.4.1-lapacke-shared.patch b/lapack-3.4.1-lapacke-shared.patch new file mode 100644 index 0000000..8cdcb86 --- /dev/null +++ b/lapack-3.4.1-lapacke-shared.patch @@ -0,0 +1,21 @@ +diff -up lapack-3.4.1/INSTALL/make.inc.gfortran.shared lapack-3.4.1/INSTALL/make.inc.gfortran +--- lapack-3.4.1/INSTALL/make.inc.gfortran.shared 2012-09-06 21:55:01.659280432 -0600 ++++ lapack-3.4.1/INSTALL/make.inc.gfortran 2012-09-06 22:04:04.936732440 -0600 +@@ -73,3 +73,4 @@ BLASLIB = ../../librefblas.a + LAPACKLIB = liblapack.a + TMGLIB = libtmglib.a + LAPACKELIB = liblapacke.a ++SHLIB_LAPACKE = liblapacke.so +diff -up lapack-3.4.1/LAPACKE/Makefile.shared lapack-3.4.1/LAPACKE/Makefile +--- lapack-3.4.1/LAPACKE/Makefile.shared 2012-04-12 17:05:47.000000000 -0600 ++++ lapack-3.4.1/LAPACKE/Makefile 2012-09-06 21:55:01.851271802 -0600 +@@ -49,6 +49,9 @@ lapacke: + cd src && $(MAKE) + cd utils && $(MAKE) + ++shlib: lapacke ++ $(CC) $(CFLAGS) -shared -Wl,-soname,$(SHLIB_LAPACKE).@SHORTVER@ -o $(SHLIB_LAPACKE).@LONGVER@ src/*.o utils/*.o $(LIBS) -lgfortran -lc ++ + lapacke_example: + cd example && $(MAKE) + diff --git a/lapack-3.5.0-R-blas-fixes.patch b/lapack-3.5.0-R-blas-fixes.patch new file mode 100644 index 0000000..178142d --- /dev/null +++ b/lapack-3.5.0-R-blas-fixes.patch @@ -0,0 +1,99 @@ +diff -up lapack-3.5.0/BLAS/SRC/dgbmv.f.R lapack-3.5.0/BLAS/SRC/dgbmv.f +--- lapack-3.5.0/BLAS/SRC/dgbmv.f.R 2011-11-03 16:32:56.000000000 -0400 ++++ lapack-3.5.0/BLAS/SRC/dgbmv.f 2014-07-07 11:50:48.257175853 -0400 +@@ -312,18 +312,18 @@ + JX = KX + IF (INCY.EQ.1) THEN + DO 60 J = 1,N +- IF (X(JX).NE.ZERO) THEN ++c IF (X(JX).NE.ZERO) THEN + TEMP = ALPHA*X(JX) + K = KUP1 - J + DO 50 I = MAX(1,J-KU),MIN(M,J+KL) + Y(I) = Y(I) + TEMP*A(K+I,J) + 50 CONTINUE +- END IF ++c END IF + JX = JX + INCX + 60 CONTINUE + ELSE + DO 80 J = 1,N +- IF (X(JX).NE.ZERO) THEN ++c IF (X(JX).NE.ZERO) THEN + TEMP = ALPHA*X(JX) + IY = KY + K = KUP1 - J +@@ -331,7 +331,7 @@ + Y(IY) = Y(IY) + TEMP*A(K+I,J) + IY = IY + INCY + 70 CONTINUE +- END IF ++c END IF + JX = JX + INCX + IF (J.GT.KU) KY = KY + INCY + 80 CONTINUE +diff -up lapack-3.5.0/BLAS/SRC/dgemm.f.R lapack-3.5.0/BLAS/SRC/dgemm.f +--- lapack-3.5.0/BLAS/SRC/dgemm.f.R 2011-11-03 16:32:56.000000000 -0400 ++++ lapack-3.5.0/BLAS/SRC/dgemm.f 2014-07-07 11:50:48.257175853 -0400 +@@ -311,12 +311,12 @@ + 60 CONTINUE + END IF + DO 80 L = 1,K +- IF (B(L,J).NE.ZERO) THEN ++c IF (B(L,J).NE.ZERO) THEN + TEMP = ALPHA*B(L,J) + DO 70 I = 1,M + C(I,J) = C(I,J) + TEMP*A(I,L) + 70 CONTINUE +- END IF ++c END IF + 80 CONTINUE + 90 CONTINUE + ELSE +@@ -353,12 +353,12 @@ + 140 CONTINUE + END IF + DO 160 L = 1,K +- IF (B(J,L).NE.ZERO) THEN ++c IF (B(J,L).NE.ZERO) THEN + TEMP = ALPHA*B(J,L) + DO 150 I = 1,M + C(I,J) = C(I,J) + TEMP*A(I,L) + 150 CONTINUE +- END IF ++c END IF + 160 CONTINUE + 170 CONTINUE + ELSE +diff -up lapack-3.5.0/BLAS/SRC/dgemv.f.R lapack-3.5.0/BLAS/SRC/dgemv.f +--- lapack-3.5.0/BLAS/SRC/dgemv.f.R 2011-11-03 16:32:56.000000000 -0400 ++++ lapack-3.5.0/BLAS/SRC/dgemv.f 2014-07-07 11:50:48.257175853 -0400 +@@ -278,24 +278,24 @@ + JX = KX + IF (INCY.EQ.1) THEN + DO 60 J = 1,N +- IF (X(JX).NE.ZERO) THEN ++c IF (X(JX).NE.ZERO) THEN + TEMP = ALPHA*X(JX) + DO 50 I = 1,M + Y(I) = Y(I) + TEMP*A(I,J) + 50 CONTINUE +- END IF ++c END IF + JX = JX + INCX + 60 CONTINUE + ELSE + DO 80 J = 1,N +- IF (X(JX).NE.ZERO) THEN ++c IF (X(JX).NE.ZERO) THEN + TEMP = ALPHA*X(JX) + IY = KY + DO 70 I = 1,M + Y(IY) = Y(IY) + TEMP*A(I,J) + IY = IY + INCY + 70 CONTINUE +- END IF ++c END IF + JX = JX + INCX + 80 CONTINUE + END IF diff --git a/lapack-3.5.0-lapacke-matgenobj.patch b/lapack-3.5.0-lapacke-matgenobj.patch new file mode 100644 index 0000000..fc0f21b --- /dev/null +++ b/lapack-3.5.0-lapacke-matgenobj.patch @@ -0,0 +1,12 @@ +diff -up lapack-3.5.0/LAPACKE/src/Makefile.matgenobj lapack-3.5.0/LAPACKE/src/Makefile +--- lapack-3.5.0/LAPACKE/src/Makefile.matgenobj 2014-06-17 13:21:30.336206692 -0400 ++++ lapack-3.5.0/LAPACKE/src/Makefile 2014-06-17 13:22:31.701885152 -0400 +@@ -2061,7 +2061,7 @@ lapacke_slagsy_work.o \ + lapacke_zlagsy.o \ + lapacke_zlagsy_work.o + +-ALLOBJ = $(SRC_OBJ) ++ALLOBJ = $(SRC_OBJ) $(MATGEN_OBJ) + + ifdef USEXBLAS + ALLXOBJ = $(SXLASRC) $(DXLASRC) $(CXLASRC) $(ZXLASRC) diff --git a/lapack-3.5.0-lapacke-tmglib.patch b/lapack-3.5.0-lapacke-tmglib.patch new file mode 100644 index 0000000..f316ddd --- /dev/null +++ b/lapack-3.5.0-lapacke-tmglib.patch @@ -0,0 +1,36 @@ +diff -up lapack-3.5.0/LAPACKE/Makefile.tmglib lapack-3.5.0/LAPACKE/Makefile +--- lapack-3.5.0/LAPACKE/Makefile.tmglib 2014-06-18 14:21:50.057725467 -0400 ++++ lapack-3.5.0/LAPACKE/Makefile 2014-06-18 14:22:29.360436295 -0400 +@@ -50,7 +50,7 @@ lapacke: + cd utils && $(MAKE) + + shlib: lapacke +- $(CC) $(CFLAGS) -shared -Wl,-soname,$(SHLIB_LAPACKE).@SHORTVER@ -o $(SHLIB_LAPACKE).@LONGVER@ src/*.o utils/*.o $(LIBS) -lgfortran -lc ++ $(CC) $(CFLAGS) -shared -Wl,-soname,$(SHLIB_LAPACKE).@SHORTVER@ -o $(SHLIB_LAPACKE).@LONGVER@ src/*.o utils/*.o $(LIBS) -lgfortran -lc ../TESTING/MATGEN/*.o + + lapacke_example: + cd example && $(MAKE) +diff -up lapack-3.5.0/LAPACKE/src/Makefile.tmglib lapack-3.5.0/LAPACKE/src/Makefile +--- lapack-3.5.0/LAPACKE/src/Makefile.tmglib 2014-06-18 14:21:50.056725474 -0400 ++++ lapack-3.5.0/LAPACKE/src/Makefile 2014-06-18 14:21:50.057725467 -0400 +@@ -2073,7 +2073,7 @@ OBJ_FILES := $(C_FILES:.o=.o) + all: ../../$(LAPACKELIB) + + ../../$(LAPACKELIB): $(ALLOBJ) $(ALLXOBJ) +- $(ARCH) $(ARCHFLAGS) ../../$(LAPACKELIB) $(ALLOBJ) $(ALLXOBJ) ++ $(ARCH) $(ARCHFLAGS) ../../$(LAPACKELIB) $(ALLOBJ) $(ALLXOBJ) ../../TESTING/MATGEN/*.o + $(RANLIB) ../../$(LAPACKELIB) + + .c.o: +diff -up lapack-3.5.0/Makefile.tmglib lapack-3.5.0/Makefile +--- lapack-3.5.0/Makefile.tmglib 2012-04-13 14:22:32.000000000 -0400 ++++ lapack-3.5.0/Makefile 2014-06-18 14:21:50.058725460 -0400 +@@ -23,7 +23,7 @@ blaslib: + lapacklib: lapack_install + ( cd SRC; $(MAKE) ) + +-lapackelib: lapacklib ++lapackelib: tmglib lapacklib + ( cd lapacke; $(MAKE) ) + + lapacke_example: lapackelib diff --git a/lapack-3.6.0-lapacke-tmglib.patch b/lapack-3.6.0-lapacke-tmglib.patch new file mode 100644 index 0000000..e9cf018 --- /dev/null +++ b/lapack-3.6.0-lapacke-tmglib.patch @@ -0,0 +1,36 @@ +diff -up lapack-3.6.0/LAPACKE/Makefile.tmglib lapack-3.6.0/LAPACKE/Makefile +--- lapack-3.6.0/LAPACKE/Makefile.tmglib 2015-12-02 10:01:15.639679177 -0500 ++++ lapack-3.6.0/LAPACKE/Makefile 2015-12-02 10:01:15.660679035 -0500 +@@ -50,7 +50,7 @@ lapacke: + cd utils && $(MAKE) + + shlib: lapacke +- $(CC) $(CFLAGS) -shared -Wl,-soname,$(SHLIB_LAPACKE).@SHORTVER@ -o $(SHLIB_LAPACKE).@LONGVER@ src/*.o utils/*.o $(LIBS) -lgfortran -lc ++ $(CC) $(CFLAGS) -shared -Wl,-soname,$(SHLIB_LAPACKE).@SHORTVER@ -o $(SHLIB_LAPACKE).@LONGVER@ src/*.o utils/*.o $(LIBS) -lgfortran -lc ../TESTING/MATGEN/*.o + + lapacke_example: + cd example && $(MAKE) +diff -up lapack-3.6.0/LAPACKE/src/Makefile.tmglib lapack-3.6.0/LAPACKE/src/Makefile +--- lapack-3.6.0/LAPACKE/src/Makefile.tmglib 2015-12-03 12:18:04.216900463 -0500 ++++ lapack-3.6.0/LAPACKE/src/Makefile 2015-12-03 12:18:15.638838677 -0500 +@@ -2204,7 +2204,7 @@ OBJ_FILES := $(C_FILES:.o=.o) + all: ../../$(LAPACKELIB) + + ../../$(LAPACKELIB): $(ALLOBJ) $(ALLXOBJ) $(DEPRECATED) +- $(ARCH) $(ARCHFLAGS) ../../$(LAPACKELIB) $(ALLOBJ) $(ALLXOBJ) $(DEPRECATED) ++ $(ARCH) $(ARCHFLAGS) ../../$(LAPACKELIB) $(ALLOBJ) $(ALLXOBJ) $(DEPRECATED) ../../TESTING/MATGEN/*.o + $(RANLIB) ../../$(LAPACKELIB) + + .c.o: +diff -up lapack-3.6.0/Makefile.tmglib lapack-3.6.0/Makefile +--- lapack-3.6.0/Makefile.tmglib 2015-11-01 11:21:47.000000000 -0500 ++++ lapack-3.6.0/Makefile 2015-12-02 10:01:15.647679123 -0500 +@@ -26,7 +26,7 @@ cblaslib: + lapacklib: lapack_install + ( cd SRC; $(MAKE) ) + +-lapackelib: lapacklib ++lapackelib: tmglib lapacklib + ( cd LAPACKE; $(MAKE) ) + + cblas_example: cblaslib blaslib diff --git a/lapack-3.6.0-make.inc.patch b/lapack-3.6.0-make.inc.patch new file mode 100644 index 0000000..ef26a58 --- /dev/null +++ b/lapack-3.6.0-make.inc.patch @@ -0,0 +1,15 @@ +diff -up lapack-3.6.0/INSTALL/make.inc.gfortran.fedora lapack-3.6.0/INSTALL/make.inc.gfortran +--- lapack-3.6.0/INSTALL/make.inc.gfortran.fedora 2015-11-17 11:25:02.789918460 -0500 ++++ lapack-3.6.0/INSTALL/make.inc.gfortran 2015-11-17 11:26:45.806115209 -0500 +@@ -17,9 +17,9 @@ SHELL = /bin/sh + # should not compile LAPACK with flags such as -ffpe-trap=overflow. + # + FORTRAN = gfortran +-OPTS = -O2 -frecursive ++#OPTS = -O2 -frecursive + DRVOPTS = $(OPTS) +-NOOPT = -O0 -frecursive ++NOOPT = + LOADER = gfortran + LOADOPTS = + # diff --git a/lapack-3.7.1-lapacke-shared.patch b/lapack-3.7.1-lapacke-shared.patch new file mode 100644 index 0000000..91be631 --- /dev/null +++ b/lapack-3.7.1-lapacke-shared.patch @@ -0,0 +1,21 @@ +diff -up lapack-3.7.1/INSTALL/make.inc.gfortran.shared lapack-3.7.1/INSTALL/make.inc.gfortran +--- lapack-3.7.1/INSTALL/make.inc.gfortran.shared 2017-08-01 11:46:10.665067382 -0400 ++++ lapack-3.7.1/INSTALL/make.inc.gfortran 2017-08-01 11:46:10.667067346 -0400 +@@ -83,3 +83,4 @@ CBLASLIB = ../../libcblas.a + LAPACKLIB = liblapack.a + TMGLIB = libtmglib.a + LAPACKELIB = liblapacke.a ++SHLIB_LAPACKE = liblapacke.so +diff -up lapack-3.7.1/LAPACKE/Makefile.shared lapack-3.7.1/LAPACKE/Makefile +--- lapack-3.7.1/LAPACKE/Makefile.shared 2017-08-01 11:46:10.668067328 -0400 ++++ lapack-3.7.1/LAPACKE/Makefile 2017-08-01 11:47:23.649755395 -0400 +@@ -51,6 +51,9 @@ lapacke: include/lapacke_mangling.h + include/lapacke_mangling.h: include/lapacke_mangling_with_flags.h.in + cp $< $@ + ++shlib: lapacke ++ $(CC) $(CFLAGS) -shared -Wl,-soname,$(SHLIB_LAPACKE).@SHORTVER@ -o $(SHLIB_LAPACKE).@LONGVER@ src/*.o utils/*.o $(LDFLAGS) $(LIBS) -lgfortran -lc -L.. -llapack ++ + lapacke_example: lapacke + $(MAKE) -C example + diff --git a/lapack-3.7.1-lapacke-tmglib.patch b/lapack-3.7.1-lapacke-tmglib.patch new file mode 100644 index 0000000..226e49f --- /dev/null +++ b/lapack-3.7.1-lapacke-tmglib.patch @@ -0,0 +1,25 @@ +diff -up lapack-3.7.1/LAPACKE/Makefile.tmglib lapack-3.7.1/LAPACKE/Makefile +--- lapack-3.7.1/LAPACKE/Makefile.tmglib 2017-08-01 11:49:38.704327630 -0400 ++++ lapack-3.7.1/LAPACKE/Makefile 2017-08-01 11:50:51.487019275 -0400 +@@ -52,7 +52,7 @@ include/lapacke_mangling.h: include/lapa + cp $< $@ + + shlib: lapacke +- $(CC) $(CFLAGS) -shared -Wl,-soname,$(SHLIB_LAPACKE).@SHORTVER@ -o $(SHLIB_LAPACKE).@LONGVER@ src/*.o utils/*.o $(LDFLAGS) $(LIBS) -lgfortran -lc -L.. -llapack ++ $(CC) $(CFLAGS) -shared -Wl,-soname,$(SHLIB_LAPACKE).@SHORTVER@ -o $(SHLIB_LAPACKE).@LONGVER@ src/*.o utils/*.o $(LDFLAGS) $(LIBS) -lgfortran -lc -L.. -llapack ../TESTING/MATGEN/*.o + + lapacke_example: lapacke + $(MAKE) -C example +diff -up lapack-3.7.1/LAPACKE/src/Makefile.tmglib lapack-3.7.1/LAPACKE/src/Makefile +diff -up lapack-3.7.1/Makefile.tmglib lapack-3.7.1/Makefile +--- lapack-3.7.1/Makefile.tmglib 2017-08-01 11:49:38.705327612 -0400 ++++ lapack-3.7.1/Makefile 2017-08-01 11:52:11.149587246 -0400 +@@ -20,7 +20,7 @@ cblaslib: + lapacklib: + $(MAKE) -C SRC + +-lapackelib: ++lapackelib: tmglib + $(MAKE) -C LAPACKE + + tmglib: diff --git a/lapack-3.7.1-make.inc.patch b/lapack-3.7.1-make.inc.patch new file mode 100644 index 0000000..20071f6 --- /dev/null +++ b/lapack-3.7.1-make.inc.patch @@ -0,0 +1,15 @@ +diff -up lapack-3.7.1/INSTALL/make.inc.gfortran.fedora lapack-3.7.1/INSTALL/make.inc.gfortran +--- lapack-3.7.1/INSTALL/make.inc.gfortran.fedora 2017-08-01 11:44:51.631843862 -0400 ++++ lapack-3.7.1/INSTALL/make.inc.gfortran 2017-08-01 11:45:32.001809404 -0400 +@@ -20,9 +20,9 @@ CFLAGS = -O3 + # should not compile LAPACK with flags such as -ffpe-trap=overflow. + # + FORTRAN = gfortran +-OPTS = -O2 -frecursive ++# OPTS = -O2 -frecursive + DRVOPTS = $(OPTS) +-NOOPT = -O0 -frecursive ++NOOPT = + + # Define LOADER and LOADOPTS to refer to the loader and desired + # load options for your machine. diff --git a/lapack-3.8.0-missing-aawork.patch b/lapack-3.8.0-missing-aawork.patch new file mode 100644 index 0000000..1dcbbe1 --- /dev/null +++ b/lapack-3.8.0-missing-aawork.patch @@ -0,0 +1,19 @@ +diff -up lapack-3.8.0/LAPACKE/src/Makefile.missing-aawork lapack-3.8.0/LAPACKE/src/Makefile +--- lapack-3.8.0/LAPACKE/src/Makefile.missing-aawork 2018-02-26 15:44:06.754611865 -0500 ++++ lapack-3.8.0/LAPACKE/src/Makefile 2018-02-26 15:44:39.925820302 -0500 +@@ -1101,6 +1101,7 @@ lapacke_dsytrf_work.o \ + lapacke_dsytrf_rook.o \ + lapacke_dsytrf_rook_work.o \ + lapacke_dsytrf_aa.o \ ++lapacke_dsytrf_aa_work.o \ + lapacke_dsytrf_aa_2stage.o \ + lapacke_dsytrf_aa_2stage_work.o \ + lapacke_dsytrf_rk.o \ +@@ -1661,6 +1662,7 @@ lapacke_ssytrf_work.o \ + lapacke_ssytrf_rook.o \ + lapacke_ssytrf_rook_work.o \ + lapacke_ssytrf_aa.o \ ++lapacke_ssytrf_aa_work.o \ + lapacke_ssytrf_aa_2stage.o \ + lapacke_ssytrf_aa_2stage_work.o \ + lapacke_ssytrf_rk.o \ diff --git a/lapack-3.9.0-lapacke-shared.patch b/lapack-3.9.0-lapacke-shared.patch new file mode 100644 index 0000000..2cfd668 --- /dev/null +++ b/lapack-3.9.0-lapacke-shared.patch @@ -0,0 +1,21 @@ +diff -up lapack-3.9.0/INSTALL/make.inc.gfortran.shared lapack-3.9.0/INSTALL/make.inc.gfortran +--- lapack-3.9.0/INSTALL/make.inc.gfortran.shared 2019-11-26 09:19:38.689118934 -0500 ++++ lapack-3.9.0/INSTALL/make.inc.gfortran 2019-11-26 09:20:52.442628214 -0500 +@@ -81,3 +81,4 @@ CBLASLIB = $(TOPSRCDIR)/libcblas.a + LAPACKLIB = $(TOPSRCDIR)/liblapack.a + TMGLIB = $(TOPSRCDIR)/libtmglib.a + LAPACKELIB = $(TOPSRCDIR)/liblapacke.a ++SHLIB_LAPACKE = $(TOPSRCDIR)/liblapacke.so +diff -up lapack-3.9.0/LAPACKE/Makefile.shared lapack-3.9.0/LAPACKE/Makefile +--- lapack-3.9.0/LAPACKE/Makefile.shared 2019-11-26 09:19:38.689118934 -0500 ++++ lapack-3.9.0/LAPACKE/Makefile 2019-11-26 09:21:59.977268139 -0500 +@@ -54,6 +54,9 @@ lapacke: include/lapacke_mangling.h + include/lapacke_mangling.h: include/lapacke_mangling_with_flags.h.in + cp include/lapacke_mangling_with_flags.h.in $@ + ++shlib: lapacke ++ $(CC) $(CFLAGS) -shared -Wl,-soname,liblapacke.so.@SHORTVER@ -o $(SHLIB_LAPACKE).@LONGVER@ src/*.o utils/*.o $(LDFLAGS) $(LIBS) -lgfortran -lc -L.. -llapack ++ + .PHONY: lapacke_example + lapacke_example: lapacke + $(MAKE) -C example diff --git a/lapack-3.9.0-lapacke-tmglib.patch b/lapack-3.9.0-lapacke-tmglib.patch new file mode 100644 index 0000000..0dbe50f --- /dev/null +++ b/lapack-3.9.0-lapacke-tmglib.patch @@ -0,0 +1,24 @@ +diff -up lapack-3.9.0/LAPACKE/Makefile.tmglib lapack-3.9.0/LAPACKE/Makefile +--- lapack-3.9.0/LAPACKE/Makefile.tmglib 2019-11-26 09:22:35.406554632 -0500 ++++ lapack-3.9.0/LAPACKE/Makefile 2019-11-26 09:24:06.927711490 -0500 +@@ -55,7 +55,7 @@ include/lapacke_mangling.h: include/lapa + cp include/lapacke_mangling_with_flags.h.in $@ + + shlib: lapacke +- $(CC) $(CFLAGS) -shared -Wl,-soname,liblapacke.so.@SHORTVER@ -o $(SHLIB_LAPACKE).@LONGVER@ src/*.o utils/*.o $(LDFLAGS) $(LIBS) -lgfortran -lc -L.. -llapack ++ $(CC) $(CFLAGS) -shared -Wl,-soname,liblapacke.so.@SHORTVER@ -o $(SHLIB_LAPACKE).@LONGVER@ src/*.o utils/*.o $(LDFLAGS) $(LIBS) -lgfortran -lc -L.. -llapack ../TESTING/MATGEN/*.o + + .PHONY: lapacke_example + lapacke_example: lapacke +diff -up lapack-3.9.0/Makefile.tmglib lapack-3.9.0/Makefile +--- lapack-3.9.0/Makefile.tmglib 2019-11-26 09:22:35.408554591 -0500 ++++ lapack-3.9.0/Makefile 2019-11-26 09:48:23.173944899 -0500 +@@ -27,7 +27,7 @@ lapacklib: + $(MAKE) -C SRC + + .PHONY: lapackelib +-lapackelib: ++lapackelib: tmglib + $(MAKE) -C LAPACKE + + .PHONY: blaspplib diff --git a/lapack-3.9.0-make.inc.patch b/lapack-3.9.0-make.inc.patch new file mode 100644 index 0000000..401dcfa --- /dev/null +++ b/lapack-3.9.0-make.inc.patch @@ -0,0 +1,15 @@ +diff -up lapack-3.9.0/INSTALL/make.inc.gfortran.fedora lapack-3.9.0/INSTALL/make.inc.gfortran +--- lapack-3.9.0/INSTALL/make.inc.gfortran.fedora 2019-11-26 09:18:42.671256080 -0500 ++++ lapack-3.9.0/INSTALL/make.inc.gfortran 2019-11-26 09:19:09.296715591 -0500 +@@ -20,9 +20,9 @@ CFLAGS = -O3 + # should not compile LAPACK with flags such as -ffpe-trap=overflow. + # + FC = gfortran +-FFLAGS = -O2 -frecursive ++FFLAGS = + FFLAGS_DRV = $(FFLAGS) +-FFLAGS_NOOPT = -O0 -frecursive ++FFLAGS_NOOPT = + + # Define LDFLAGS to the desired linker options for your machine. + # diff --git a/lapack-fixlwork.patch b/lapack-fixlwork.patch new file mode 100644 index 0000000..27759f0 --- /dev/null +++ b/lapack-fixlwork.patch @@ -0,0 +1,40 @@ +--- LAPACK/SRC/dgesdd.f.BAD 2005-10-06 22:53:05.000000000 -0500 ++++ LAPACK/SRC/dgesdd.f 2005-10-06 22:55:17.000000000 -0500 +@@ -294,6 +294,8 @@ + * + WRKBL = 3*N + ( M+N )*ILAENV( 1, 'DGEBRD', ' ', M, N, -1, + $ -1 ) ++ MAXWRK = MAX( WRKBL, BDSPAC ) ++ MINWRK = BDSPAC + IF( WNTQO ) THEN + WRKBL = MAX( WRKBL, 3*N+N* + $ ILAENV( 1, 'DORMBR', 'QLN', M, N, N, -1 ) ) +@@ -390,6 +392,8 @@ + * + WRKBL = 3*M + ( M+N )*ILAENV( 1, 'DGEBRD', ' ', M, N, -1, + $ -1 ) ++ MAXWRK = MAX( WRKBL, BDSPAC ) ++ MINWRK = BDSPAC + IF( WNTQO ) THEN + WRKBL = MAX( WRKBL, 3*M+M* + $ ILAENV( 1, 'DORMBR', 'QLN', M, M, N, -1 ) ) +--- LAPACK/SRC/sgesdd.f.BAD 2005-10-06 22:55:41.000000000 -0500 ++++ LAPACK/SRC/sgesdd.f 2005-10-06 22:56:50.000000000 -0500 +@@ -294,6 +294,8 @@ + * + WRKBL = 3*N + ( M+N )*ILAENV( 1, 'SGEBRD', ' ', M, N, -1, + $ -1 ) ++ MAXWRK = MAX( WRKBL, BDSPAC ) ++ MINWRK = BDSPAC + IF( WNTQO ) THEN + WRKBL = MAX( WRKBL, 3*N+N* + $ ILAENV( 1, 'SORMBR', 'QLN', M, N, N, -1 ) ) +@@ -390,6 +392,8 @@ + * + WRKBL = 3*M + ( M+N )*ILAENV( 1, 'SGEBRD', ' ', M, N, -1, + $ -1 ) ++ MAXWRK = MAX( WRKBL, BDSPAC ) ++ MINWRK = BDSPAC + IF( WNTQO ) THEN + WRKBL = MAX( WRKBL, 3*M+M* + $ ILAENV( 1, 'SORMBR', 'QLN', M, M, N, -1 ) ) diff --git a/lapack.spec b/lapack.spec new file mode 100644 index 0000000..2717727 --- /dev/null +++ b/lapack.spec @@ -0,0 +1,1015 @@ +%global shortver 3 +%global mediumver %{shortver}.9 + +%if %{?__isa_bits:%{__isa_bits}}%{!?__isa_bits:32} == 64 +%global arch64 1 +%else +%global arch64 0 +%endif + +Summary: Numerical linear algebra package libraries +Name: lapack +Version: %{mediumver}.0 +Release: 5%{?dist} +License: BSD +URL: http://www.netlib.org/lapack/ +Source0: https://github.com/Reference-LAPACK/lapack/archive/v%{version}.tar.gz +Source1: http://www.netlib.org/lapack/manpages.tgz +Source2: Makefile.blas +Source3: Makefile.lapack +Source4: http://www.netlib.org/lapack/lapackqref.ps +Source5: http://www.netlib.org/blas/blasqr.ps +Source6: Makefile.cblas +Patch3: lapack-3.9.0-make.inc.patch +Patch4: lapack-3.9.0-lapacke-shared.patch +Patch5: lapack-3.4.1-lapacke-disable-testing-functions.patch +Patch6: lapack-3.5.0-lapacke-matgenobj.patch +Patch7: lapack-3.9.0-lapacke-tmglib.patch +# Bugzilla 1814756 +Patch8: https://github.com/Reference-LAPACK/lapack/commit/87536aa3c8bb0af00f66088fb6ac05d87509e011.patch +BuildRequires: gcc-gfortran, gawk +Requires: blas%{?_isa} = %{version}-%{release} + +%global _description_lapack %{expand: +LAPACK (Linear Algebra PACKage) is a standard library for numerical +linear algebra. LAPACK provides routines for solving systems of +simultaneous linear equations, least-squares solutions of linear +systems of equations, eigenvalue problems, and singular value +problems. Associated matrix factorizations (LU, Cholesky, QR, SVD, +Schur, and generalized Schur) and related computations (i.e., +reordering of Schur factorizations and estimating condition numbers) +are also included. LAPACK can handle dense and banded matrices, but +not general sparse matrices. Similar functionality is provided for +real and complex matrices in both single and double precision. LAPACK +is coded in Fortran90 and built with gcc. +} + +%global _description_blas %{expand: +BLAS (Basic Linear Algebra Subprograms) is a standard library which +provides a number of basic algorithms for numerical algebra. +} + +%description %_description_lapack + +%package devel +Summary: LAPACK development libraries +Requires: %{name}%{?_isa} = %{version}-%{release} +Requires: blas-devel%{?_isa} = %{version}-%{release} + +%description devel +LAPACK development libraries (shared). + +%package static +Summary: LAPACK static libraries +Requires: lapack-devel%{?_isa} = %{version}-%{release} + +%description static +LAPACK static libraries. + +%package -n blas +Summary: The Basic Linear Algebra Subprograms library + +%description -n blas %_description_blas + +%package -n blas-devel +Summary: BLAS development libraries +Requires: blas%{?_isa} = %{version}-%{release} +Requires: gcc-gfortran + +%description -n blas-devel +BLAS development libraries (shared). + +%package -n blas-static +Summary: BLAS static libraries +Requires: blas-devel%{?_isa} = %{version}-%{release} + +%description -n blas-static +BLAS static libraries. + +%if 0%{?arch64} +%package -n lapack64 +Summary: Numerical linear algebra package libraries +Requires: blas64%{?_isa} = %{version}-%{release} + +%description -n lapack64 %_description_lapack +This build has 64bit INTEGER support. + +%package -n blas64 +Summary: The Basic Linear Algebra Subprograms library (64bit INTEGER) + +%description -n blas64 %_description_blas +This build has 64bit INTEGER support. + +%package -n lapack64_ +Summary: Numerical linear algebra package libraries +Requires: blas64_%{?_isa} = %{version}-%{release} + +%description -n lapack64_ %_description_lapack +This build has 64bit INTEGER support and a symbol name suffix. + +%package -n blas64_ +Summary: The Basic Linear Algebra Subprograms library (64bit INTEGER) + +%description -n blas64_ %_description_blas +This build has 64bit INTEGER support and a symbol name suffix. +%endif + +%prep +%setup -q +%setup -q -D -T -a1 +%patch3 -p1 -b .fedora +%patch4 -p1 -b .shared +# %patch5 -p1 -b .disable-functions +# %patch6 -p1 -b .matgenobj +%patch7 -p1 -b .tmglib +%patch8 -p1 -b .bz1814756 + +mkdir manpages +mv man/ manpages/ + +cp -f INSTALL/make.inc.gfortran make.inc +cp -f %{SOURCE2} BLAS/SRC/Makefile +cp -f %{SOURCE3} SRC/Makefile +cp -f %{SOURCE6} CBLAS/src/Makefile + +sed -i "s|@SHORTVER@|%{shortver}|g" BLAS/SRC/Makefile +sed -i "s|@SHORTVER@|%{shortver}|g" SRC/Makefile +sed -i "s|@SHORTVER@|%{shortver}|g" LAPACKE/Makefile +sed -i "s|@SHORTVER@|%{shortver}|g" CBLAS/src/Makefile +sed -i "s|@LONGVER@|%{version}|g" BLAS/SRC/Makefile +sed -i "s|@LONGVER@|%{version}|g" SRC/Makefile +sed -i "s|@LONGVER@|%{version}|g" LAPACKE/Makefile +sed -i "s|@LONGVER@|%{version}|g" CBLAS/src/Makefile + +%build +RPM_OPT_FLAGS="$RPM_OPT_FLAGS -frecursive --no-optimize-sibling-calls" +RPM_OPT_O_FLAGS=$(echo $RPM_OPT_FLAGS | sed 's|-O2|-O0|') +export FC=gfortran + +# Build BLAS +pushd BLAS/SRC +FFLAGS="$RPM_OPT_O_FLAGS" make dcabs1.o +FFLAGS="$RPM_OPT_FLAGS" CFLAGS="$RPM_OPT_FLAGS" make static +cp libblas.a ${RPM_BUILD_DIR}/%{name}-%{version}/ +make clean +FFLAGS="$RPM_OPT_O_FLAGS -fPIC" make dcabs1.o +FFLAGS="$RPM_OPT_FLAGS -fPIC" CFLAGS="$RPM_OPT_FLAGS -fPIC" LDFLAGS="%{build_ldflags}" make shared +cp libblas.so.%{version} ${RPM_BUILD_DIR}/%{name}-%{version}/ +%if 0%{?arch64} +make clean +FFLAGS="$RPM_OPT_O_FLAGS -fdefault-integer-8" make dcabs1.o +FFLAGS="$RPM_OPT_FLAGS -fdefault-integer-8" CFLAGS="$RPM_OPT_FLAGS" make static +cp libblas.a ${RPM_BUILD_DIR}/%{name}-%{version}/libblas64.a +make clean +FFLAGS="$RPM_OPT_O_FLAGS -fPIC -fdefault-integer-8" make dcabs1.o +FFLAGS="$RPM_OPT_FLAGS -fPIC -fdefault-integer-8" CFLAGS="$RPM_OPT_FLAGS -fPIC" LDFLAGS="%{build_ldflags}" make shared +cp libblas.so.%{version} ${RPM_BUILD_DIR}/%{name}-%{version}/libblas64.so.%{version} +make clean +FFLAGS="$RPM_OPT_O_FLAGS -fdefault-integer-8" make dcabs1.o +SYMBOLSUFFIX="64_" FFLAGS="$RPM_OPT_FLAGS -fdefault-integer-8" CFLAGS="$RPM_OPT_FLAGS" make static +cp libblas64_.a ${RPM_BUILD_DIR}/%{name}-%{version}/libblas64_.a +make clean +FFLAGS="$RPM_OPT_O_FLAGS -fPIC -fdefault-integer-8" make dcabs1.o +SYMBOLSUFFIX="64_" FFLAGS="$RPM_OPT_FLAGS -fPIC -fdefault-integer-8" CFLAGS="$RPM_OPT_FLAGS -fPIC" LDFLAGS="%{build_ldflags}" make shared +cp libblas64_.so.%{version} ${RPM_BUILD_DIR}/%{name}-%{version}/libblas64_.so.%{version} +%endif +popd + +ln -s libblas.so.%{version} libblas.so +%if 0%{?arch64} +ln -s libblas64.so.%{version} libblas64.so +ln -s libblas64_.so.%{version} libblas64_.so +%endif + +# Build CBLAS +cp CBLAS/include/cblas_mangling_with_flags.h.in CBLAS/include/cblas_mangling.h +pushd CBLAS/src +FFLAGS="$RPM_OPT_FLAGS" CFLAGS="$RPM_OPT_FLAGS -I../include" make static +cp libcblas.a ${RPM_BUILD_DIR}/%{name}-%{version}/ +make clean +FFLAGS="$RPM_OPT_FLAGS -fPIC" CFLAGS="$RPM_OPT_FLAGS -fPIC -I../include" LDFLAGS="%{build_ldflags}" make shared +cp libcblas.so.%{version} ${RPM_BUILD_DIR}/%{name}-%{version}/ +%if 0%{?arch64} +make clean +FFLAGS="$RPM_OPT_FLAGS -fdefault-integer-8" CFLAGS="$RPM_OPT_FLAGS -I../include" make static +cp libcblas.a ${RPM_BUILD_DIR}/%{name}-%{version}/libcblas64.a +make clean +FFLAGS="$RPM_OPT_FLAGS -fPIC -fdefault-integer-8" CFLAGS="$RPM_OPT_FLAGS -fPIC -I../include" LDFLAGS="%{build_ldflags}" make shared +cp libcblas.so.%{version} ${RPM_BUILD_DIR}/%{name}-%{version}/libcblas64.so.%{version} +make clean +SYMBOLSUFFIX="64_" FFLAGS="$RPM_OPT_FLAGS -fdefault-integer-8" CFLAGS="$RPM_OPT_FLAGS -I../include" make static +cp libcblas64_.a ${RPM_BUILD_DIR}/%{name}-%{version}/libcblas64_.a +make clean +SYMBOLSUFFIX="64_" FFLAGS="$RPM_OPT_FLAGS -fPIC -fdefault-integer-8" CFLAGS="$RPM_OPT_FLAGS -fPIC -I../include" LDFLAGS="%{build_ldflags}" make shared +cp libcblas64_.so.%{version} ${RPM_BUILD_DIR}/%{name}-%{version}/libcblas64_.so.%{version} +%endif +popd + +ln -s libcblas.so.%{version} libcblas.so +%if 0%{?arch64} +ln -s libcblas64.so.%{version} libcblas64.so +ln -s libcblas64_.so.%{version} libcblas64_.so +%endif + +# Build the static dlamch, dsecnd, lsame, second, slamch bits +pushd INSTALL +make FFLAGS_NOOPT="$RPM_OPT_O_FLAGS" FFLAGS="$RPM_OPT_FLAGS" +popd + +# Build the static lapack library +pushd SRC +make FFLAGS="$RPM_OPT_FLAGS" CFLAGS="$RPM_OPT_FLAGS" static +cp liblapack.a ${RPM_BUILD_DIR}/%{name}-%{version}/ +popd + +# Build the static with pic dlamch, dsecnd, lsame, second, slamch bits +pushd INSTALL +make clean +make FFLAGS_NOOPT="$RPM_OPT_O_FLAGS -fPIC" FFLAGS="$RPM_OPT_FLAGS -fPIC" +popd + +# Build the static with pic lapack library +pushd SRC +make clean +make FFLAGS="$RPM_OPT_FLAGS -fPIC" CFLAGS="$RPM_OPT_FLAGS -fPIC" static +cp liblapack.a ${RPM_BUILD_DIR}/%{name}-%{version}/liblapack_pic.a +popd + +%if 0%{?arch64} +# Build the static dlamch, dsecnd, lsame, second, slamch bits +pushd INSTALL +make clean +make FFLAGS_NOOPT="$RPM_OPT_O_FLAGS -fdefault-integer-8" FFLAGS="$RPM_OPT_FLAGS -fdefault-integer-8" +popd + +# Build the static lapack library +pushd SRC +make clean +make FFLAGS="$RPM_OPT_FLAGS -fdefault-integer-8" CFLAGS="$RPM_OPT_FLAGS" static +cp liblapack.a ${RPM_BUILD_DIR}/%{name}-%{version}/liblapack64.a +popd + +# Build the static with pic dlamch, dsecnd, lsame, second, slamch bits (64bit INTEGER) +pushd INSTALL +make clean +make FFLAGS_NOOPT="$RPM_OPT_O_FLAGS -fPIC -fdefault-integer-8" FFLAGS="$RPM_OPT_FLAGS -fPIC -fdefault-integer-8" +popd + +# Build the static with pic lapack library (64bit INTEGER) +pushd SRC +make clean +make FFLAGS="$RPM_OPT_FLAGS -fPIC -fdefault-integer-8" CFLAGS="$RPM_OPT_FLAGS -fPIC" static +cp liblapack.a ${RPM_BUILD_DIR}/%{name}-%{version}/liblapack_pic64.a +popd + +# Build the static dlamch, dsecnd, lsame, second, slamch bits +pushd INSTALL +make clean +make FFLAGS_NOOPT="$RPM_OPT_O_FLAGS -fdefault-integer-8" FFLAGS="$RPM_OPT_FLAGS -fdefault-integer-8" +popd + +# Build the static lapack library +pushd SRC +make clean +make SYMBOLSUFFIX="64_" FFLAGS="$RPM_OPT_FLAGS -fdefault-integer-8" CFLAGS="$RPM_OPT_FLAGS" static +cp liblapack64_.a ${RPM_BUILD_DIR}/%{name}-%{version}/liblapack64_.a +popd + +# Build the static with pic dlamch, dsecnd, lsame, second, slamch bits (64bit INTEGER) +pushd INSTALL +make clean +make FFLAGS_NOOPT="$RPM_OPT_O_FLAGS -fPIC -fdefault-integer-8" FFLAGS="$RPM_OPT_FLAGS -fPIC -fdefault-integer-8" +popd + +# Build the static with pic lapack library (64bit INTEGER) +pushd SRC +make clean +make SYMBOLSUFFIX="64_" FFLAGS="$RPM_OPT_FLAGS -fPIC -fdefault-integer-8" CFLAGS="$RPM_OPT_FLAGS -fPIC" static +cp liblapack64_.a ${RPM_BUILD_DIR}/%{name}-%{version}/liblapack_pic64_.a +popd +%endif + +# Build the shared dlamch, dsecnd, lsame, second, slamch bits +pushd INSTALL +make clean +make FFLAGS_NOOPT="$RPM_OPT_O_FLAGS -fPIC" FFLAGS="$RPM_OPT_FLAGS -fPIC" +popd + +# Build the shared lapack library +pushd SRC +make clean +make FFLAGS="$RPM_OPT_FLAGS -fPIC" CFLAGS="$RPM_OPT_FLAGS -fPIC" LDFLAGS="%{build_ldflags}" shared +cp liblapack.so.%{version} ${RPM_BUILD_DIR}/%{name}-%{version}/ +popd + +%if 0%{?arch64} +# Build the shared dlamch, dsecnd, lsame, second, slamch bits +pushd INSTALL +make clean +make FFLAGS_NOOPT="$RPM_OPT_O_FLAGS -fPIC -fdefault-integer-8" FFLAGS="$RPM_OPT_FLAGS -fPIC -fdefault-integer-8" +popd + +# Build the shared lapack library +pushd SRC +make clean +make FFLAGS="$RPM_OPT_FLAGS -fPIC -fdefault-integer-8" CFLAGS="$RPM_OPT_FLAGS -fPIC -fdefault-integer-8" LDFLAGS="%{build_ldflags}" shared +cp liblapack.so.%{version} ${RPM_BUILD_DIR}/%{name}-%{version}/liblapack64.so.%{version} +popd + +# Build the shared dlamch, dsecnd, lsame, second, slamch bits +pushd INSTALL +make clean +make FFLAGS_NOOPT="$RPM_OPT_O_FLAGS -fPIC -fdefault-integer-8" FFLAGS="$RPM_OPT_FLAGS -fPIC -fdefault-integer-8" +popd + +# Build the shared lapack library +pushd SRC +make clean +make SYMBOLSUFFIX="64_" FFLAGS="$RPM_OPT_FLAGS -fPIC -fdefault-integer-8" CFLAGS="$RPM_OPT_FLAGS -fPIC -fdefault-integer-8" LDFLAGS="%{build_ldflags}" shared +cp liblapack64_.so.%{version} ${RPM_BUILD_DIR}/%{name}-%{version}/liblapack64_.so.%{version} +popd +%endif + +ln -s liblapack.so.%{version} liblapack.so +%if 0%{?arch64} +ln -s liblapack64.so.%{version} liblapack64.so +ln -s liblapack64_.so.%{version} liblapack64_.so +%endif + +# Build the lapacke libraries +make FFLAGS="$RPM_OPT_FLAGS -fPIC" FFLAGS_NOOPT="$RPM_OPT_O_FLAGS -fPIC" tmglib +pushd LAPACKE +make clean +make CFLAGS="$RPM_OPT_FLAGS" BUILD_DEPRECATED="true" lapacke +make clean +make CFLAGS="$RPM_OPT_FLAGS -fPIC" BUILD_DEPRECATED="true" LDFLAGS="%{build_ldflags}" shlib +# cp liblapacke.so.%{version} ${RPM_BUILD_DIR}/%{name}-%{version}/ +popd + +cp -p %{SOURCE4} lapackqref.ps +cp -p %{SOURCE5} blasqr.ps + +%install +mkdir -p %{buildroot}%{_libdir} +mkdir -p %{buildroot}%{_mandir}/man3 +chmod 755 %{buildroot}%{_mandir}/man3 + +for f in liblapack.so.%{version} libblas.so.%{version} libcblas.so.%{version} liblapacke.so.%{version} \ + libblas.a libcblas.a liblapack.a liblapack_pic.a liblapacke.a; do + cp -f $f ${RPM_BUILD_ROOT}%{_libdir}/$f +done + +%if 0%{?arch64} +for f in liblapack64.so.%{version} libblas64.so.%{version} libcblas64.so.%{version} \ + liblapack64_.so.%{version} libblas64_.so.%{version} libcblas64_.so.%{version} \ + libblas64.a libcblas64.a liblapack64.a liblapack_pic64.a \ + libblas64_.a libcblas64_.a liblapack64_.a liblapack_pic64_.a; do + cp -f $f ${RPM_BUILD_ROOT}%{_libdir}/$f +done +%endif + +# Blas manpages +pushd manpages/ +mkdir -p blas/man/man3 +cd man/man3/ +mv caxpy.f.3 caxpy.3 ccopy.f.3 ccopy.3 cdotc.f.3 cdotc.3 cdotu.f.3 cdotu.3 cgbmv.f.3 cgbmv.3 \ +cgemm.f.3 cgemm.3 cgemv.f.3 cgemv.3 cgerc.f.3 cgerc.3 cgeru.f.3 cgeru.3 chbmv.f.3 chbmv.3 \ +chemm.f.3 chemm.3 chemv.f.3 chemv.3 cher.f.3 cher.3 cher2.f.3 cher2.3 cher2k.f.3 cher2k.3 \ +cherk.f.3 cherk.3 chpmv.f.3 chpmv.3 chpr.f.3 chpr.3 chpr2.f.3 chpr2.3 crotg.f.3 crotg.3 \ +cscal.f.3 cscal.3 csrot.f.3 csrot.3 csscal.f.3 csscal.3 cswap.f.3 cswap.3 csymm.f.3 \ +csymm.3 csyr2k.f.3 csyr2k.3 csyrk.f.3 csyrk.3 ctbmv.f.3 ctbmv.3 ctbsv.f.3 ctbsv.3 ctpmv.f.3 \ +ctpmv.3 ctpsv.f.3 ctpsv.3 ctrmm.f.3 ctrmm.3 ctrmv.f.3 ctrmv.3 ctrsm.f.3 ctrsm.3 ctrsv.f.3 \ +ctrsv.3 dasum.f.3 dasum.3 daxpy.f.3 daxpy.3 dcabs1.f.3 dcabs1.3 dcopy.f.3 dcopy.3 ddot.f.3 \ +ddot.3 dgbmv.f.3 dgbmv.3 dgemm.f.3 dgemm.3 dgemv.f.3 dgemv.3 dger.f.3 dger.3 dnrm2.f.3 \ +dnrm2.3 drot.f.3 drot.3 drotg.f.3 drotg.3 drotm.f.3 drotm.3 drotmg.f.3 drotmg.3 dsbmv.f.3 \ +dsbmv.3 dscal.f.3 dscal.3 dsdot.f.3 dsdot.3 dspmv.f.3 dspmv.3 dspr.f.3 dspr.3 dspr2.f.3 \ +dspr2.3 dswap.f.3 dswap.3 dsymm.f.3 dsymm.3 dsymv.f.3 dsymv.3 dsyr.f.3 dsyr.3 dsyr2.f.3 \ +dsyr2.3 dsyr2k.f.3 dsyr2k.3 dsyrk.f.3 dsyrk.3 dtbmv.f.3 dtbmv.3 dtbsv.f.3 dtbsv.3 dtpmv.f.3 \ +dtpmv.3 dtpsv.f.3 dtpsv.3 dtrmm.f.3 dtrmm.3 dtrmv.f.3 dtrmv.3 dtrsm.f.3 dtrsm.3 dtrsv.f.3 \ +dtrsv.3 dzasum.f.3 dzasum.3 dznrm2.f.3 dznrm2.3 icamax.f.3 icamax.3 idamax.f.3 idamax.3 \ +isamax.f.3 isamax.3 izamax.f.3 izamax.3 lsame.3 sasum.f.3 sasum.3 saxpy.f.3 saxpy.3 \ +scabs1.f.3 scabs1.3 scasum.f.3 scasum.3 scnrm2.f.3 scnrm2.3 scopy.f.3 scopy.3 sdot.f.3 sdot.3 \ +sdsdot.f.3 sdsdot.3 sgbmv.f.3 sgbmv.3 sgemm.f.3 sgemm.3 sgemv.f.3 sgemv.3 sger.f.3 sger.3 \ +snrm2.f.3 snrm2.3 srot.f.3 srot.3 srotg.f.3 srotg.3 srotm.f.3 srotm.3 srotmg.f.3 srotmg.3 \ +ssbmv.f.3 ssbmv.3 sscal.f.3 sscal.3 sspmv.f.3 sspmv.3 sspr.f.3 sspr.3 sspr2.f.3 sspr2.3 \ +sswap.f.3 sswap.3 ssymm.f.3 ssymm.3 ssymv.f.3 ssymv.3 ssyr.f.3 ssyr.3 ssyr2.f.3 ssyr2.3 \ +ssyr2k.f.3 ssyr2k.3 ssyrk.f.3 ssyrk.3 stbmv.f.3 stbmv.3 stbsv.f.3 stbsv.3 stpmv.f.3 stpmv.3 \ +stpsv.f.3 stpsv.3 strmm.f.3 strmm.3 strmv.f.3 strmv.3 strsm.f.3 strsm.3 strsv.f.3 strsv.3 \ +xerbla.3 xerbla_array.3 zaxpy.f.3 zaxpy.3 zcopy.f.3 zcopy.3 \ +zdotc.f.3 zdotc.3 zdotu.f.3 zdotu.3 zdrot.f.3 zdrot.3 zdscal.f.3 zdscal.3 zgbmv.f.3 zgbmv.3 \ +zgemm.f.3 zgemm.3 zgemv.f.3 zgemv.3 zgerc.f.3 zgerc.3 zgeru.f.3 zgeru.3 zhbmv.f.3 zhbmv.3 \ +zhemm.f.3 zhemm.3 zhemv.f.3 zhemv.3 zher.f.3 zher.3 zher2.f.3 zher2.3 zher2k.f.3 zher2k.3 \ +zherk.f.3 zherk.3 zhpmv.f.3 zhpmv.3 zhpr.f.3 zhpr.3 zhpr2.f.3 zhpr2.3 zrotg.f.3 zrotg.3 \ +zscal.f.3 zscal.3 zswap.f.3 zswap.3 zsymm.f.3 zsymm.3 zsyr2k.f.3 zsyr2k.3 zsyrk.f.3 zsyrk.3 \ +ztbmv.f.3 ztbmv.3 ztbsv.f.3 ztbsv.3 ztpmv.f.3 ztpmv.3 ztpsv.f.3 ztpsv.3 ztrmm.f.3 ztrmm.3 \ +ztrmv.f.3 ztrmv.3 ztrsm.f.3 ztrsm.3 ztrsv.f.3 ztrsv.3 ../../blas/man/man3 +cd ../.. +popd + +find manpages/blas/man/man3 -type f -printf "%{_mandir}/man3/%f*\n" > blasmans + +# remove weird man pages +pushd manpages/man/man3 +rm -rf _Users_julie* +popd + +find manpages/man/man3 -type f -printf "%{_mandir}/man3/%f*\n" > lapackmans + +cp -f manpages/blas/man/man3/* ${RPM_BUILD_ROOT}%{_mandir}/man3 +cp -f manpages/man/man3/* ${RPM_BUILD_ROOT}%{_mandir}/man3 + +# Cblas headers +mkdir -p %{buildroot}%{_includedir}/cblas/ +cp -a CBLAS/include/*.h %{buildroot}%{_includedir}/cblas/ + +# Lapacke headers +mkdir -p %{buildroot}%{_includedir}/lapacke/ +cp -a LAPACKE/include/*.h %{buildroot}%{_includedir}/lapacke/ + +pushd %{buildroot}%{_libdir} +ln -sf liblapack.so.%{version} liblapack.so +ln -sf liblapack.so.%{version} liblapack.so.%{shortver} +ln -sf liblapack.so.%{version} liblapack.so.%{mediumver} +ln -sf libblas.so.%{version} libblas.so +ln -sf libblas.so.%{version} libblas.so.%{shortver} +ln -sf libblas.so.%{version} libblas.so.%{mediumver} +ln -sf libcblas.so.%{version} libcblas.so +ln -sf libcblas.so.%{version} libcblas.so.%{shortver} +ln -sf libcblas.so.%{version} libcblas.so.%{mediumver} +ln -sf liblapacke.so.%{version} liblapacke.so +ln -sf liblapacke.so.%{version} liblapacke.so.%{shortver} +ln -sf liblapacke.so.%{version} liblapacke.so.%{mediumver} +%if 0%{?arch64} +ln -sf liblapack64.so.%{version} liblapack64.so +ln -sf liblapack64.so.%{version} liblapack64.so.%{shortver} +ln -sf liblapack64.so.%{version} liblapack64.so.%{mediumver} +ln -sf libblas64.so.%{version} libblas64.so +ln -sf libblas64.so.%{version} libblas64.so.%{shortver} +ln -sf libblas64.so.%{version} libblas64.so.%{mediumver} +ln -sf libcblas64.so.%{version} libcblas64.so +ln -sf libcblas64.so.%{version} libcblas64.so.%{shortver} +ln -sf libcblas64.so.%{version} libcblas64.so.%{mediumver} +ln -sf liblapack64_.so.%{version} liblapack64_.so +ln -sf liblapack64_.so.%{version} liblapack64_.so.%{shortver} +ln -sf liblapack64_.so.%{version} liblapack64_.so.%{mediumver} +ln -sf libblas64_.so.%{version} libblas64_.so +ln -sf libblas64_.so.%{version} libblas64_.so.%{shortver} +ln -sf libblas64_.so.%{version} libblas64_.so.%{mediumver} +ln -sf libcblas64_.so.%{version} libcblas64_.so +ln -sf libcblas64_.so.%{version} libcblas64_.so.%{shortver} +ln -sf libcblas64_.so.%{version} libcblas64_.so.%{mediumver} +%endif +popd + +# pkgconfig +mkdir -p %{buildroot}%{_libdir}/pkgconfig/ +cp -a lapack.pc.in %{buildroot}%{_libdir}/pkgconfig/lapack.pc +sed -i 's|@CMAKE_INSTALL_FULL_LIBDIR@|%{_libdir}|g' %{buildroot}%{_libdir}/pkgconfig/lapack.pc +sed -i 's|@CMAKE_INSTALL_FULL_INCLUDEDIR@|%{_includedir}|g' %{buildroot}%{_libdir}/pkgconfig/lapack.pc +sed -i 's|@LAPACK_VERSION@|%{version}|g' %{buildroot}%{_libdir}/pkgconfig/lapack.pc +%if 0%{?arch64} +cp -a %{buildroot}%{_libdir}/pkgconfig/lapack.pc %{buildroot}%{_libdir}/pkgconfig/lapack64.pc +sed -i 's|-llapack|-llapack64|g' %{buildroot}%{_libdir}/pkgconfig/lapack64.pc +sed -i 's|blas|blas64|g' %{buildroot}%{_libdir}/pkgconfig/lapack64.pc +cp -a %{buildroot}%{_libdir}/pkgconfig/lapack.pc %{buildroot}%{_libdir}/pkgconfig/lapack64_.pc +sed -i 's|-llapack|-llapack64_|g' %{buildroot}%{_libdir}/pkgconfig/lapack64_.pc +sed -i 's|blas|blas64_|g' %{buildroot}%{_libdir}/pkgconfig/lapack64_.pc +%endif +cp -a BLAS/blas.pc.in %{buildroot}%{_libdir}/pkgconfig/blas.pc +sed -i 's|@CMAKE_INSTALL_FULL_LIBDIR@|%{_libdir}|g' %{buildroot}%{_libdir}/pkgconfig/blas.pc +sed -i 's|@CMAKE_INSTALL_FULL_INCLUDEDIR@|%{_includedir}|g' %{buildroot}%{_libdir}/pkgconfig/blas.pc +sed -i 's|@LAPACK_VERSION@|%{version}|g' %{buildroot}%{_libdir}/pkgconfig/blas.pc +%if 0%{?arch64} +cp -a %{buildroot}%{_libdir}/pkgconfig/blas.pc %{buildroot}%{_libdir}/pkgconfig/blas64.pc +sed -i 's|-lblas|-lblas64|g' %{buildroot}%{_libdir}/pkgconfig/blas64.pc +cp -a %{buildroot}%{_libdir}/pkgconfig/blas.pc %{buildroot}%{_libdir}/pkgconfig/blas64_.pc +sed -i 's|-lblas|-lblas64_|g' %{buildroot}%{_libdir}/pkgconfig/blas64_.pc +%endif +cp -a LAPACKE/lapacke.pc.in %{buildroot}%{_libdir}/pkgconfig/lapacke.pc +sed -i 's|@CMAKE_INSTALL_FULL_LIBDIR@|%{_libdir}|g' %{buildroot}%{_libdir}/pkgconfig/lapacke.pc +sed -i 's|@CMAKE_INSTALL_FULL_INCLUDEDIR@|%{_includedir}/lapacke|g' %{buildroot}%{_libdir}/pkgconfig/lapacke.pc +sed -i 's|@LAPACK_VERSION@|%{version}|g' %{buildroot}%{_libdir}/pkgconfig/lapacke.pc +cp -a CBLAS/cblas.pc.in %{buildroot}%{_libdir}/pkgconfig/cblas.pc +sed -i 's|@CMAKE_INSTALL_FULL_LIBDIR@|%{_libdir}|g' %{buildroot}%{_libdir}/pkgconfig/cblas.pc +sed -i 's|@CMAKE_INSTALL_FULL_INCLUDEDIR@|%{_includedir}/cblas|g' %{buildroot}%{_libdir}/pkgconfig/cblas.pc +sed -i 's|@LAPACK_VERSION@|%{version}|g' %{buildroot}%{_libdir}/pkgconfig/cblas.pc +%if 0%{?arch64} +cp -a %{buildroot}%{_libdir}/pkgconfig/cblas.pc %{buildroot}%{_libdir}/pkgconfig/cblas64.pc +sed -i 's|-lcblas|-lcblas64|g' %{buildroot}%{_libdir}/pkgconfig/cblas64.pc +sed -i 's|Requires.private: blas|Requires.private: blas64|g' %{buildroot}%{_libdir}/pkgconfig/cblas64.pc +cp -a %{buildroot}%{_libdir}/pkgconfig/cblas.pc %{buildroot}%{_libdir}/pkgconfig/cblas64_.pc +sed -i 's|-lcblas|-lcblas64_|g' %{buildroot}%{_libdir}/pkgconfig/cblas64_.pc +sed -i 's|Requires.private: blas|Requires.private: blas64_|g' %{buildroot}%{_libdir}/pkgconfig/cblas64_.pc +%endif + +%ldconfig_scriptlets + +%ldconfig_scriptlets -n blas + +%if 0%{?arch64} +%ldconfig_scriptlets -n lapack64 +%ldconfig_scriptlets -n lapack64_ + +%ldconfig_scriptlets -n blas64 +%ldconfig_scriptlets -n blas64_ +%endif + +%files -f lapackmans +%doc README.md LICENSE lapackqref.ps +%{_libdir}/liblapack.so.* +%{_libdir}/liblapacke.so.* + +%files devel +%{_includedir}/lapacke/ +%{_libdir}/liblapack.so +%{_libdir}/liblapacke.so +%{_libdir}/pkgconfig/lapack.pc +%{_libdir}/pkgconfig/lapacke.pc +%if 0%{?arch64} +%{_libdir}/liblapack64.so +%{_libdir}/pkgconfig/lapack64.pc +%{_libdir}/liblapack64_.so +%{_libdir}/pkgconfig/lapack64_.pc +%endif + +%files static +%{_libdir}/liblapack.a +%{_libdir}/liblapack_pic.a +%{_libdir}/liblapacke.a +%if 0%{?arch64} +%{_libdir}/liblapack64.a +%{_libdir}/liblapack_pic64.a +%{_libdir}/liblapack64_.a +%{_libdir}/liblapack_pic64_.a +%endif + +%files -n blas -f blasmans +%doc blasqr.ps LICENSE +%{_libdir}/libblas.so.* +%{_libdir}/libcblas.so.* + +%files -n blas-devel +%{_includedir}/cblas/ +%{_libdir}/libblas.so +%{_libdir}/libcblas.so +%{_libdir}/pkgconfig/blas.pc +%{_libdir}/pkgconfig/cblas.pc +%if 0%{?arch64} +%{_libdir}/libblas64.so +%{_libdir}/libcblas64.so +%{_libdir}/pkgconfig/blas64.pc +%{_libdir}/pkgconfig/cblas64.pc +%{_libdir}/libblas64_.so +%{_libdir}/libcblas64_.so +%{_libdir}/pkgconfig/blas64_.pc +%{_libdir}/pkgconfig/cblas64_.pc +%endif + +%files -n blas-static +%{_libdir}/libblas.a +%{_libdir}/libcblas.a +%if 0%{?arch64} +%{_libdir}/libblas64.a +%{_libdir}/libcblas64.a +%{_libdir}/libblas64_.a +%{_libdir}/libcblas64_.a +%endif + +%if 0%{?arch64} +%files -n blas64 +%doc LICENSE +%{_libdir}/libblas64.so.* +%{_libdir}/libcblas64.so.* + +%files -n lapack64 +%doc README.md LICENSE +%{_libdir}/liblapack64.so.* + +%files -n blas64_ +%doc LICENSE +%{_libdir}/libblas64_.so.* +%{_libdir}/libcblas64_.so.* + +%files -n lapack64_ +%doc README.md LICENSE +%{_libdir}/liblapack64_.so.* +%endif + +%changelog +* Tue Jul 28 2020 Fedora Release Engineering - 3.9.0-5 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_33_Mass_Rebuild + +* Sun Jun 21 2020 Iñaki Úcar - 3.9.0-4 +- make separate packages for 64-bit versions with and without suffix (bz1295965) + +* Thu Mar 19 2020 Tom Callaway - 3.9.0-3 +- apply upstream fix for accidental removal of deprecated symbols from header file + +* Wed Jan 29 2020 Fedora Release Engineering - 3.9.0-2 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_32_Mass_Rebuild + +* Tue Nov 26 2019 Tom Callaway - 3.9.0-1 +- update to 3.9.0 + +* Thu Jul 25 2019 Fedora Release Engineering - 3.8.0-13 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_31_Mass_Rebuild + +* Wed May 29 2019 Tom Callaway - 3.8.0-12 +- use --no-optimize-sibling-calls to work around gfortran issues + +* Fri Feb 01 2019 Fedora Release Engineering - 3.8.0-11 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_30_Mass_Rebuild + +* Fri Jul 13 2018 Fedora Release Engineering - 3.8.0-10 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_29_Mass_Rebuild + +* Tue Jun 19 2018 Tom Callaway - 3.8.0-9 +- explicitly link liblapacke.so with liblapack to remove undefined-non-weak-symbols + +* Mon Mar 5 2018 Tom Callaway - 3.8.0-8 +- use LDFLAGS for shared libs + +* Mon Feb 26 2018 Tom Callaway - 3.8.0-7 +- add missing aawork functions back to lapacke makefile (bz1549262) + +* Wed Feb 07 2018 Fedora Release Engineering - 3.8.0-6 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_28_Mass_Rebuild + +* Tue Jan 30 2018 Björn Esser - 3.8.0-5 +- Rebuilt for GCC8 + +* Wed Jan 3 2018 Tom Callaway - 3.8.0-4 +- fix cblas + +* Mon Dec 4 2017 Tom Callaway - 3.8.0-3 +- build cblas +- include pkgconfig files. + +* Fri Nov 17 2017 Tom Callaway - 3.8.0-2 +- add ilaenv2stage + +* Wed Nov 15 2017 Tom Callaway - 3.8.0-1 +- update to 3.8.0 + +* Mon Aug 14 2017 Tom Callaway - 3.7.1-5 +- rename 64_ libraries to lib*64_* + +* Fri Aug 11 2017 Tom Callaway - 3.7.1-4 +- move to 64_ suffix and symbol mangling (bz1295965) + +* Thu Aug 10 2017 Tom Callaway - 3.7.1-3 +- include DSLASRC and ZCLASRC + +* Wed Aug 9 2017 Tom Callaway - 3.7.1-2 +- fixup Makefile.lapack to include new stuff + +* Tue Aug 1 2017 Tom Callaway - 3.7.1-1 +- update to 3.7.1 + +* Wed Jul 26 2017 Fedora Release Engineering - 3.6.1-5 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_27_Mass_Rebuild + +* Fri Feb 10 2017 Fedora Release Engineering - 3.6.1-4 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_26_Mass_Rebuild + +* Sat Jan 28 2017 Björn Esser - 3.6.1-3 +- Rebuilt for GCC-7 + +* Mon Oct 10 2016 Tom Callaway - 3.6.1-2 +- properly set NOOPT flags during lapacke compile (thanks to sorear2@gmail.com) + +* Wed Jul 6 2016 Tom Callaway - 3.6.1-1 +- update to 3.6.1 + +* Thu Feb 04 2016 Fedora Release Engineering - 3.6.0-7 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_24_Mass_Rebuild + +* Wed Jan 13 2016 Tom Callaway - 3.6.0-6 +- fix lapack Makefile to rebuild every file on every pass (thanks to adm.fkt.physik tu-dortmund.de) + +* Thu Dec 3 2015 Tom Callaway - 3.6.0-5 +- fix lapache static lib to include TMGLIB bits + +* Wed Dec 2 2015 Tom Callaway - 3.6.0-4 +- build deprecated functions for lapacke (RHBZ #1287405) + +* Sat Nov 28 2015 Igor Gnatenko - 3.6.0-3 +- build deprecated functions also (RHBZ #1286349) + +* Thu Nov 19 2015 Tom Callaway - 3.6.0-2 +- add missing functions, resolves bz1282958 + +* Tue Nov 17 2015 Tom Callaway - 3.6.0-1 +- update to 3.6.0 + +* Fri Sep 11 2015 Tom Callaway - 3.5.0-12 +- fix missing dependencies between subpackages +- delete broken and wrongly installed manpages +- fix isa_bits conditional + +* Wed Jun 17 2015 Fedora Release Engineering - 3.5.0-11 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_23_Mass_Rebuild + +* Thu Dec 18 2014 Susi Lehtola 3.5.0-10 +- Add the -frecursive flag so that the functions are thread safe. + +* Mon Oct 13 2014 Peter Robinson 3.5.0-9 +- Use generic macro to detect 64 bit platforms + +* Sun Aug 17 2014 Fedora Release Engineering - 3.5.0-8 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_21_22_Mass_Rebuild + +* Mon Jul 7 2014 Tom Callaway - 3.5.0-7 +- apply BLAS fixes from R + +* Thu Jun 19 2014 Tom Callaway - 3.5.0-6 +- compile in tmglib object files, not static lib + +* Wed Jun 18 2014 Tom Callaway - 3.5.0-5 +- link tmglib into lapacke + +* Tue Jun 17 2014 Tom Callaway - 3.5.0-4 +- include matgen_obj items in lapacke library + +* Sat Jun 07 2014 Fedora Release Engineering - 3.5.0-3 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_21_Mass_Rebuild + +* Thu May 29 2014 Michael Schwendt - 3.5.0-2 +- Don't include manual page directories (#1089412). +- Use standard group System Environment/Libraries in runtime library packages. + +* Mon Nov 18 2013 Tom Callaway - 3.5.0-1 +- update to 3.5.0 + +* Sat Aug 03 2013 Fedora Release Engineering - 3.4.2-3 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_20_Mass_Rebuild + +* Mon Mar 25 2013 Tom Callaway - 3.4.2-2 +- clean out non-free example files from source tarball + +* Thu Feb 21 2013 Tom Callaway - 3.4.2-1 +- update to 3.4.2 + +* Thu Feb 14 2013 Fedora Release Engineering - 3.4.1-5 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_19_Mass_Rebuild + +* Mon Jan 7 2013 Tom Callaway - 3.4.1-4 +- fix 64bit sonames + +* Fri Jan 4 2013 Tom Callaway - 3.4.1-3 +- enable 64bit INTEGER variant subpackages + +* Wed Oct 24 2012 Tom Callaway - 3.4.1-2 +- fix issue where lapacke was linking to testing functions (bz860332) + +* Thu Sep 06 2012 Orion Poplawski - 3.4.1-1 +- Update to 3.4.1 +- Rebase lapacke shared lib patch + +* Thu Jul 19 2012 Fedora Release Engineering - 3.4.0-3 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_18_Mass_Rebuild + +* Fri Jan 13 2012 Fedora Release Engineering - 3.4.0-2 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_17_Mass_Rebuild + +* Mon Nov 28 2011 Tom Callaway - 3.4.0-1 +- update to 3.4.0 +- build and include lapacke + +* Thu Jun 02 2011 Tom Callaway - 3.3.1-1 +- update to 3.3.1 +- create /usr/share/man/manl/ as 0755 and own it in lapack and blas (bz634369) +- spec file cleanup + +* Mon Feb 07 2011 Fedora Release Engineering - 3.2.2-3 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_15_Mass_Rebuild + +* Sat Jul 17 2010 Dan Horák - 3.2.2-2 +- fix a typo in Makefile.lapack causing #615618 + +* Wed Jul 7 2010 Tom "spot" Callaway - 3.2.2-1 +- update to 3.2.2 +- properly include license text +- static subpackages depend on -devel (they're not useful without it) +- clean up makefiles +- pass on version into makefiles, rather than manually hacking on each update + +* Wed Dec 9 2009 Tom "spot" Callaway - 3.2.1-4 +- Move static libs to static subpackages (resolves bz 545143) + +* Fri Sep 4 2009 Tom "spot" Callaway - 3.2.1-3 +- use RPM_OPT_O_FLAGS (-O0) everywhere necessary, drop RPM_OPT_SIZE_FLAGS (-Os) (bz 520518) + +* Thu Aug 20 2009 Tom "spot" Callaway - 3.2.1-2 +- don't enable xblas yet + +* Fri Aug 14 2009 Tom "spot" Callaway - 3.2.1-1 +- update to 3.2.1, spec file cleanups + +* Mon Aug 10 2009 Ville Skyttä - 3.1.1-7 +- Convert specfile to UTF-8. + +* Fri Jul 24 2009 Fedora Release Engineering - 3.1.1-6 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_12_Mass_Rebuild + +* Wed Feb 25 2009 Fedora Release Engineering - 3.1.1-5 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_11_Mass_Rebuild + +* Tue Jul 8 2008 Tom "spot" Callaway 3.1.1-4 +- fix missing dependencies (bz 442915) + +* Tue Feb 19 2008 Fedora Release Engineering - 3.1.1-3 +- Autorebuild for GCC 4.3 + +* Thu Aug 23 2007 Tom "spot" Callaway 3.1.1-2 +- fix license (BSD) +- rebuild for BuildID + +* Fri May 25 2007 Tom "spot" Callaway 3.1.1-1 +- bump to 3.1.1 + +* Fri Jan 5 2007 Tom "spot" Callaway 3.1.0-4 +- fix bugzillas 219740,219741 + +* Wed Dec 20 2006 Tom "spot" Callaway 3.1.0-3 +- make clean everywhere + +* Wed Dec 20 2006 Tom "spot" Callaway 3.1.0-2 +- fix the Makefiles + +* Tue Nov 14 2006 Tom "spot" Callaway 3.1.0-1 +- bump to 3.1.0 + +* Thu Sep 14 2006 Tom "spot" Callaway 3.0-38 +- bump for fc-6 + +* Tue Feb 28 2006 Tom "spot" Callaway 3.0-37 +- bump for FC5 + +* Mon Dec 19 2005 Tom "spot" Callaway 3.0-36 +- bump for gcc4.1 + +* Tue Nov 15 2005 Tom "spot" Callaway 3.0-35 +- try not to patch files that do not exist + +* Tue Nov 15 2005 Tom "spot" Callaway 3.0-34 +- finish fixing bz 143340 + +* Thu Oct 6 2005 Tom "spot" Callaway 3.0-33 +- fix bz 169558 + +* Wed Sep 28 2005 Tom "spot" Callaway 3.0-32 +- move to latest upstream 3.0 tarballs +- add 8 missing BLAS functions from upstream blas tarball (bz 143340) + +* Thu Sep 22 2005 Tom "spot" Callaway 3.0-31 +- actually install liblapack_pic.a + +* Wed Sep 14 2005 Tom "spot" Callaway 3.0-30 +- make -devel packages +- make liblapack_pic.a package +- use dist tag + +* Thu Apr 14 2005 Tom "spot" Callaway 3.0-29 +- package moves to Fedora Extras, gcc4 + +* Tue Dec 21 2004 Ivana Varekova +- fix bug #143420 problem with compiler optimalizations + +* Tue Nov 30 2004 Ivana Varekova +- fix bug #138683 problem with compilation + +* Thu Nov 11 2004 Ivana Varekova +- fix build problem bug #138447 + +* Tue Jun 15 2004 Elliot Lee +- rebuilt + +* Tue Mar 02 2004 Elliot Lee +- rebuilt + +* Fri Feb 13 2004 Elliot Lee +- rebuilt + +* Wed Dec 31 2003 Jeff Johnson 3.0-23 +- link -lg2c explicitly into liblapack and libblas (#109079). + +* Wed Aug 20 2003 Jeremy Katz 3.0-22 +- nuke -man subpackages (#97506) + +* Wed Jun 04 2003 Elliot Lee +- rebuilt + +* Wed Jan 22 2003 Tim Powers +- rebuilt + +* Sun Nov 10 2002 Jeff Johnson 3.0-19 +- rebuild with x86_64. + +* Thu Jul 18 2002 Trond Eivind Glomsrod 3.0-18 +- Remove an empty man page (#63569) + +* Fri Jun 21 2002 Tim Powers +- automated rebuild + +* Thu May 23 2002 Tim Powers +- automated rebuild + +* Wed May 1 2002 Trond Eivind Glomsrod 3.0-15 +- Rebuild + +* Thu Feb 21 2002 Trond Eivind Glomsrod 3.0-14 +- Rebuild + +* Wed Jan 09 2002 Tim Powers +- automated rebuild + +* Mon Aug 13 2001 Trond Eivind Glomsrod 3.0-12 +- The man-pages for xerbla and lsame were in blas-man and lapack-man (#51605) + +* Fri Jun 8 2001 Trond Eivind Glomsrod +- Reenable optimization for IA64 + +* Fri May 25 2001 Trond Eivind Glomsrod +- Add all patches from the LAPACK site as of 2001-05-25 +- Use this workaround for IA64 instead +- Remove SPARC workaround +- Don't exclude IA64 + +* Thu Dec 07 2000 Trond Eivind Glomsrod +- rebuild for main distribution + +* Mon Nov 20 2000 Trond Eivind Glomsrod +- add the LAPACK Quick Reference Guide to the docs +- add the BLAS Quick Reference Guide to the docs + +* Tue Aug 01 2000 Trond Eivind Glomsrod +- fix lack of ldconfig in postuninstall script + +* Mon Jul 24 2000 Prospector +- rebuilt + +* Mon Jul 10 2000 Trond Eivind Glomsrod +- updated with the latest updates (new tarfile..) from netlib + +* Thu Jun 15 2000 Trond Eivind Glomsrod +- use %%{_mandir} +- added some flags to work around SPARC compiler bug + +* Wed Jan 19 2000 Tim Powers +- bzipped sources to conserve space + +* Tue Jan 4 2000 Jeff Johnson +- build for PowerTools 6.2. + +* Sat Dec 25 1999 Joachim Frieben +- updated to version v3.0 + update as of Tue Nov 30 1999 + +* Sat Oct 23 1999 Joachim Frieben +- updated Red Hat makefiles to v3.0 + +* Mon Aug 2 1999 Tim Powers +- updated to v3.0 +- built for 6.1 + +* Mon Apr 12 1999 Michael Maher +- built package for 6.0 + +* Sat Oct 24 1998 Jeff Johnson +- new description/summary text. + +* Fri Jul 17 1998 Jeff Johnson +- repackage for powertools. + +* Sun Feb 15 1998 Trond Eivind Glomsrod + [lapack-2.0-9] + - No code updates, just built with a customized rpm - + this should make dependencies right. + +* Sat Feb 07 1998 Trond Eivind Glomsrod + [lapack-2.0-8] + - Total rewrite of the spec file + - Added my own makefiles - libs should build better, + static libs should work (and be faster than they + would be if they had worked earlier ;) + - No patch necessary anymore. + - Renamed lapack-blas and lapack-blas-man to + blas and blas-man. "Obsoletes:" tag added. + (oh - and as always: Dedicated to the girl I + love, Eline Skirnisdottir) + +* Sat Dec 06 1997 Trond Eivind Glomsrod + [lapack-2.0-7] + - added a dependency to glibc, so people don't try with libc5 + +* Thu Nov 20 1997 Trond Eivind Glomsrod + [lapack-2.0-6] + - removed etime.c + - compiled with egcs, and for glibc 2.0 + +* Sun Oct 12 1997 Trond Eivind Glomsrod + [lapack-2.0-5] + - added a changelog + - cleaned up building of shared libs + - now uses a BuildRoot + - cleaned up the specfile diff --git a/lapackqref.ps b/lapackqref.ps new file mode 100644 index 0000000..f1d457b --- /dev/null +++ b/lapackqref.ps @@ -0,0 +1,1078 @@ +%!PS-Adobe-2.0 +%%Creator: dvipsk 5.58f Copyright 1986, 1994 Radical Eye Software +%%Title: quickref.dvi +%%Pages: 4 +%%PageOrder: Ascend +%%Orientation: Landscape +%%BoundingBox: 0 0 612 792 +%%DocumentPaperSizes: Letter +%%EndComments +%DVIPSCommandLine: dvips -o quickref.ps quickref.dvi +%DVIPSParameters: dpi=300, comments removed +%DVIPSSource: TeX output 1999.10.04:1103 +%%BeginProcSet: tex.pro +/TeXDict 250 dict def TeXDict begin /N{def}def /B{bind def}N /S{exch}N +/X{S N}B /TR{translate}N /isls false N /vsize 11 72 mul N /hsize 8.5 72 +mul N /landplus90{false}def /@rigin{isls{[0 landplus90{1 -1}{-1 1} +ifelse 0 0 0]concat}if 72 Resolution div 72 VResolution div neg scale +isls{landplus90{VResolution 72 div vsize mul 0 exch}{Resolution -72 div +hsize mul 0}ifelse TR}if Resolution VResolution vsize -72 div 1 add mul +TR[matrix currentmatrix{dup dup round sub abs 0.00001 lt{round}if} +forall round exch round exch]setmatrix}N /@landscape{/isls true N}B +/@manualfeed{statusdict /manualfeed true put}B /@copies{/#copies X}B +/FMat[1 0 0 -1 0 0]N /FBB[0 0 0 0]N /nn 0 N /IE 0 N /ctr 0 N /df-tail{ +/nn 8 dict N nn begin /FontType 3 N /FontMatrix fntrx N /FontBBox FBB N +string /base X array /BitMaps X /BuildChar{CharBuilder}N /Encoding IE N +end dup{/foo setfont}2 array copy cvx N load 0 nn put /ctr 0 N[}B /df{ +/sf 1 N /fntrx FMat N df-tail}B /dfs{div /sf X /fntrx[sf 0 0 sf neg 0 0] +N df-tail}B /E{pop nn dup definefont setfont}B /ch-width{ch-data dup +length 5 sub get}B /ch-height{ch-data dup length 4 sub get}B /ch-xoff{ +128 ch-data dup length 3 sub get sub}B /ch-yoff{ch-data dup length 2 sub +get 127 sub}B /ch-dx{ch-data dup length 1 sub get}B /ch-image{ch-data +dup type /stringtype ne{ctr get /ctr ctr 1 add N}if}B /id 0 N /rw 0 N +/rc 0 N /gp 0 N /cp 0 N /G 0 N /sf 0 N /CharBuilder{save 3 1 roll S dup +/base get 2 index get S /BitMaps get S get /ch-data X pop /ctr 0 N ch-dx +0 ch-xoff ch-yoff ch-height sub ch-xoff ch-width add ch-yoff +setcachedevice ch-width ch-height true[1 0 0 -1 -.1 ch-xoff sub ch-yoff +.1 sub]{ch-image}imagemask restore}B /D{/cc X dup type /stringtype ne{]} +if nn /base get cc ctr put nn /BitMaps get S ctr S sf 1 ne{dup dup +length 1 sub dup 2 index S get sf div put}if put /ctr ctr 1 add N}B /I{ +cc 1 add D}B /bop{userdict /bop-hook known{bop-hook}if /SI save N @rigin +0 0 moveto /V matrix currentmatrix dup 1 get dup mul exch 0 get dup mul +add .99 lt{/QV}{/RV}ifelse load def pop pop}N /eop{SI restore userdict +/eop-hook known{eop-hook}if showpage}N /@start{userdict /start-hook +known{start-hook}if pop /VResolution X /Resolution X 1000 div /DVImag X +/IE 256 array N 0 1 255{IE S 1 string dup 0 3 index put cvn put}for +65781.76 div /vsize X 65781.76 div /hsize X}N /p{show}N /RMat[1 0 0 -1 0 +0]N /BDot 260 string N /rulex 0 N /ruley 0 N /v{/ruley X /rulex X V}B /V +{}B /RV statusdict begin /product where{pop product dup length 7 ge{0 7 +getinterval dup(Display)eq exch 0 4 getinterval(NeXT)eq or}{pop false} +ifelse}{false}ifelse end{{gsave TR -.1 .1 TR 1 1 scale rulex ruley false +RMat{BDot}imagemask grestore}}{{gsave TR -.1 .1 TR rulex ruley scale 1 1 +false RMat{BDot}imagemask grestore}}ifelse B /QV{gsave newpath transform +round exch round exch itransform moveto rulex 0 rlineto 0 ruley neg +rlineto rulex neg 0 rlineto fill grestore}B /a{moveto}B /delta 0 N /tail +{dup /delta X 0 rmoveto}B /M{S p delta add tail}B /b{S p tail}B /c{-4 M} +B /d{-3 M}B /e{-2 M}B /f{-1 M}B /g{0 M}B /h{1 M}B /i{2 M}B /j{3 M}B /k{ +4 M}B /w{0 rmoveto}B /l{p -4 w}B /m{p -3 w}B /n{p -2 w}B /o{p -1 w}B /q{ +p 1 w}B /r{p 2 w}B /s{p 3 w}B /t{p 4 w}B /x{0 S rmoveto}B /y{3 2 roll p +a}B /bos{/SS save N}B /eos{SS restore}B end +%%EndProcSet +TeXDict begin 52099146 40258431 1000 300 300 (quickref.dvi) +@start /Fa 20 91 df<070007000700E738FFF87FF01FC01FC07FF0FFF8E73807000700 +07000D0E7E9012>42 D45 D<060006000E001E00FE00EE00 +0E000E000E000E000E000E000E000E000E000E000E000E00FFE0FFE00B147D9312>49 +D<01F007F80E1C181C381C70007000E7C0EFF0F838F01CE00EE00EE00E700E700E301C38 +381FF007C00F147F9312>54 D<038007C007C006C006C00EE00EE00EE00EE00C601C701C +701C701FF01FF0383838383838FC7EFC7E0F147F9312>65 DI< +03E60FFE1C3E381E700E700E600EE000E000E000E000E000E000600E700E700E381C1C38 +0FF003E00F147F9312>III73 D76 DII<3FE07FF07070E038E038E038E038E038E038E038E038E038E038E038E0 +38E038E03870707FF03FE00D147E9312>II82 D<1F303FF070F0E070E070E070E00070007F003FC00FE000F0007800386038E0 +38E030F070FFE0CF800D147E9312>I85 D<7C7C7C7C3C701CF01EE00FE00FC007C007800380078007C0 +0FC00EE01EE01C701C703838FC7EFC7E0F147F9312>88 D<7FF8FFF8E070E070E0E000E0 +01C003800380070007000E000E001C00381C381C701C701CFFFCFFFC0E147E9312>90 +D E /Fb 27 122 df<60C0F1E0F1E070E01020102020402040408040800B0A7F9612>34 +D<60F0F06000000000000060F0F060040E7D8D0A>58 D<00FC100383300E00B01C007038 +0030300030700010600010E00010E00000E00000E00000E00000E00000E0001060001070 +00103000203800201C00400E008003830000FC0014177E9619>67 +D78 D82 D<0FC4302C601C400CC004C004C004E00070007F003FE00FF801 +FC001C000E0006800680068006C004E008D81087E00F177E9614>I<2040204040804080 +81008100E1C0F1E0F1E060C00B0A7B9612>92 D<3FC0706070302038003803F81E383038 +7038E039E039E07970FF1F1E100E7F8D12>97 DI<07F01838303870106000E000 +E000E000E000600070083008183007C00D0E7F8D10>I<003E00000E00000E00000E0000 +0E00000E00000E00000E00000E0007CE001C3E00300E00700E00600E00E00E00E00E00E0 +0E00E00E00600E00700E00301E00182E0007CF8011177F9614>I<0FC0186030307038E0 +18FFF8E000E000E000600070083010183007C00D0E7F8D10>I<0F9E18E3306070707070 +7070306018C02F80200060003FE03FF83FFC600EC006C006C006600C38380FE010157F8D +12>103 DI<307878300000000000F8383838383838383838383838FE07177F96 +0A>I108 +DII<07C018303018600C600CE00EE00EE00EE00EE00E701C3018183007C00F0E7F +8D12>II114 +D<1F4060C0C040C040E000FF007F801FC001E080608060C060E0C09F000B0E7F8D0E>I< +080008000800180018003800FFC038003800380038003800380038003840384038403840 +1C800F000A147F930E>IIII< +FE1F380E3C0C1C081C080E100E100720072007E003C003C00180018001000100E200E200 +A400780010147F8D13>121 D E /Fc 30 91 df<0C1C30306060C0C0C0C0C0C060603030 +1C0C06127C900D>40 D<80C06060303018181818181830306060C08005127D900D>I<40 +E06060C08003067B820D>44 D<1E007F00E180C0C0C0C000C000C00180030006000C0018 +0030C07FC07FC00A0F7F8E0D>50 D<0C001E001E0012001200120033003300330033003F +007F806180F3C0F3C00A0F7F8E0D>65 DI<0EC01FC031C060C0C0C0C000C000C000C000 +C000C0C060C031C01F800E000A0F7F8E0D>IIII<1F803F8063806180C180C000C000 +C3C0C3C0C180C180618063803F801D800A0F7F8E0D>III<1F1F06060606060606060606C67C38080F7E8E0D>IIIII<3F +007F80E1C0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C0E1C07F803F000A0F7F8E0D>I< +FF00FF8061C060C060C060C061C07F807F006000600060006000F000F0000A0F7F8E0D> +I<3F007F80E1C0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C0CCC0E7C07F803F000380018001 +C00A127F8E0D>II<1F807F80E180C180C000E0007C001F00038000C000C040C0C180FF +80FE000A0F7F8E0D>I<7FC0FFC0CCC0CCC00C000C000C000C000C000C000C000C000C00 +3F003F000A0F7F8E0D>IIII<73807380330036001E001E000C000C001E001E003700 +33007300F3C0F3C00A0F7F8E0D>II<7FC0FFC0C180C1800300060006000C0018001800 +30C060C060C0FFC0FFC00A0F7F8E0D>I E /Fd 51 123 df<03F00E1818181800180018 +00FFF81818181818181818181818181818181818187E7E0F11809012>12 +D<0780000C80001840001840001880001880001907E00E01800C01001602002704004304 +00C18800C0D000C0604060F8C03F0F8013117F9017>38 D<0408103020606040C0C0C0C0 +C0C0C0C0C0C04060602030100804061A7D920C>40 D<80402030101818080C0C0C0C0C0C +0C0C0C0C0818181030204080061A7F920C>I<40E060202040408003087E8209>44 +DI<0020006000C000C000C001800180030003000300060006000C00 +0C000C0018001800180030003000600060006000C000C0000B197F920F>47 +D<00C00000C00001E000016000016000023000023000023000041800041800080C000FFC +00080C00100600100600300700FC1FC012117F9016>65 DI<03F10C0B180730 +0360014001C000C000C000C000C00040016001300218020C0C03F010117E9016>IIII<03F1000C0B00 +180700300300600100400100C00000C00000C00000C01FC0C00300400300600300300300 +1803000C070003F90012117E9017>III76 +DI<03E0000C180010040020020060 +0300400100C00180C00180C00180C00180C001806003006003003006003006000C180003 +E00011117E9017>79 DI<03E0000C1800100400300600600300400100C00180 +C00180C00180C00180C0018040010060030021C2003226000E380003F080001080001080 +001980001F00000E0011167E9017>II<1F2060E0006080208020800060003E001F8001C00060002080208020C040E0C09F00 +0B117E9011>I<7FFF8060C18040C080C0C0C080C04080C04000C00000C00000C00000C0 +0000C00000C00000C00000C00000C00000C0000FFC0012117F9016>II +I<3E006300018001800F8031804180C190C19063903DE00C0B7F8A0F>97 +DI<1F8030C06000C000C000C000C000C000604030801F000A0B7F8A0E>I<01E0 +006000600060006000600F6030E06060C060C060C060C060C060606030E01F780D117F90 +11>I<1F00318060C0C0C0FFC0C000C000C000604030801F000A0B7F8A0E>I<07000D8018 +00180018001800FF001800180018001800180018001800180018007E00091180900A>I< +1EF0331061806180618033003E00400060003F803FC060E0C060C060C06060C01F000C11 +7F8A0F>II<20702000000000F0303030303030303030FC0612809108>I107 DIII<1F00318060C0C060C060C0 +60C060C06060C031801F000B0B7F8A0F>II<0F2030E06060C060C060C060C060C0 +60606030E01F60006000600060006001F80D107F8A10>II<3D038181783E038181C2BC080B7F8A0C>I<0800 +080018003800FF001800180018001800180018801880188018800F00090F808E0C>IIIIIII E /Fe 34 123 df<00FF0003C1800783C00F03C00F03 +C00F01800F00000F0000FFFFC0FFFFC00F03C00F03C00F03C00F03C00F03C00F03C00F03 +C00F03C00F03C00F03C00F03C03F87F03F87F01417809617>12 D<007F8103FFF707E03F +0F000F1E00073C00077C0003780003F80000F80000F80000F80000F80000F80000F80000 +7800037C00033C00031E00060F000C07E03803FFF0007FC018177E961D>67 +DII<003FC080 +01FFF38007E01F800F8007801E0003803C0003807C0001807C000180F8000000F8000000 +F8000000F8000000F8000000F801FFF0F801FFF07C000F807C000F803C000F801E000F80 +0F800F8007E01F8001FFFF80003FE1801C177E9620>71 D76 +DII80 D82 D<07F0801FFD80380F80700380F00380F00180F00180F80000FF80007FF8003FFE00 +1FFF0007FF80007F80000FC00007C0C003C0C003C0C003C0E00780F80F00DFFE0087F800 +12177E9617>I86 +D<0FE0003838003C1C003C1E00181E00001E0000FE000F9E003C1E00781E00F01E00F01E +00F01E00786FC01F87C0120F7F8E14>97 DI<07F81C0E381E781E700CF000F000 +F000F000F0007000780038061C0C07F80F0F7F8E12>I<001F80001F8000078000078000 +078000078000078000078003F7801E1F80380780780780700780F00780F00780F00780F0 +0780F00780700780780780380F801E1FE007E7E013177F9617>I<07F01C18380C780670 +07F007F007FFFFF000F0007000780038031E0603FC100F7F8E13>I<01F007180E3C1E3C +1E181E001E001E00FFC0FFC01E001E001E001E001E001E001E001E001E001E001E007F80 +7F800E1780960C>I<0FE700183B80301B80701C00701C00701C00701C00301800183000 +2FE0002000003000003FF8003FFE003FFF00700780E00380E00380E00380700700380E00 +0FF80011167F8E14>I<387C7C7C3800000000FCFC3C3C3C3C3C3C3C3C3C3C3CFFFF0818 +7F970B>105 D +108 DII<07F0001C1C00380E00700700700700F00780F00780F0 +0780F00780F00780700700700700380E001C1C0007F000110F7F8E14>II<07E1801E1B80380F +80780780700780F00780F00780F00780F00780F00780700780780780380F801C1F8007E7 +80000780000780000780000780001FE0001FE013157F8E15>II<1FF060704030C030E0 +00FF007FE03FF00FF80078C018C018E010F020CFC00D0F7F8E10>I<0C000C000C000C00 +1C001C003FE0FFE03C003C003C003C003C003C003C003C303C303C303C301E6007C00C15 +7F9410>II +I120 DI<7FFC787870F860F061E063E003C007800F001F +0C1E0C3C1C7C187838FFF80E0F7F8E12>I E /Ff 13 121 df68 +DI<00FF80 +6003FFF0E00FFFF8E01F80FDE03F001FE03E0007E07C0003E07C0003E0FC0001E0FC0001 +E0FC0000E0FE0000E0FE0000E0FF000000FFC000007FFC00007FFFE0003FFFF8001FFFFE +001FFFFF0007FFFF8003FFFFC000FFFFC0000FFFE000007FE000001FF000000FF0000007 +F0E00003F0E00003F0E00003F0E00003F0F00003E0F00003E0F80007E0FC0007C0FF000F +80FFE03F80E3FFFE00E1FFFC00C01FF0001C297CA825>83 D<003FC00001FFF00003E07C +000F803E001F801F001F001F003F000F807E000F807E000FC07E000FC0FE0007C0FE0007 +C0FFFFFFC0FFFFFFC0FE000000FE000000FE0000007E0000007E0000007F0000003F0001 +C01F0001C00F80038007C0070003F01E0000FFFC00003FE0001A1B7E9A1F>101 +D<07000F801FC03FE03FE03FE01FC00F8007000000000000000000000000000000FFE0FF +E0FFE00FE00FE00FE00FE00FE00FE00FE00FE00FE00FE00FE00FE00FE00FE00FE00FE00F +E00FE00FE00FE00FE0FFFEFFFEFFFE0F2B7EAA12>105 D108 DI112 D114 +D<03FE300FFFF03E03F07800F07000F0F00070F00070F80070FE0000FFE0007FFF007FFF +C03FFFE01FFFF007FFF800FFF80007FC0000FCE0007CE0003CF0003CF00038F80038FC00 +70FF01E0E7FFC0C1FF00161B7E9A1B>I<00E00000E00000E00000E00001E00001E00001 +E00003E00003E00007E0000FE0001FFFE0FFFFE0FFFFE00FE0000FE0000FE0000FE0000F +E0000FE0000FE0000FE0000FE0000FE0000FE0000FE0000FE0000FE0700FE0700FE0700F +E0700FE0700FE0700FE07007F0E003F0C001FF80007F0014267FA51A>I +118 D120 D E /Fg 8 116 df<387CFEFEFE7C3807077C8610>46 +D<00FE0007FFC00F83E01F01F03E00F83E00F87C007C7C007C7C007CFC007CFC007EFC00 +7EFC007EFC007EFC007EFC007EFC007EFC007EFC007EFC007EFC007EFC007EFC007E7C00 +7C7C007C7C007C3E00F83E00F81F01F00F83E007FFC000FE0017207E9F1C>48 +D<00FE0007FFC00F07E01E03F03F03F03F81F83F81F83F81F81F03F81F03F00003F00003 +E00007C0001F8001FE0001FF000007C00001F00001F80000FC0000FC3C00FE7E00FEFF00 +FEFF00FEFF00FEFF00FC7E01FC7801F81E07F00FFFC001FE0017207E9F1C>51 +D82 +D<0FFC003FFF807E07C07E03E07E01E07E01F03C01F00001F00001F0003FF003FDF01FC1 +F03F01F07E01F0FC01F0FC01F0FC01F0FC01F07E02F07E0CF81FF87F07E03F18167E951B +>97 D<00FE0007FF800F87C01E01E03E01F07C00F07C00F8FC00F8FC00F8FFFFF8FFFFF8 +FC0000FC0000FC00007C00007C00007E00003E00181F00300FC07003FFC000FF0015167E +951A>101 D108 D<0FF3003FFF00781F00600700E00300E00300F00300FC00 +007FE0007FF8003FFE000FFF0001FF00000F80C00780C00380E00380E00380F00700FC0E +00EFFC00C7F00011167E9516>115 D E /Fh 23 119 df<0000000007C0000000000000 +00000FE000000000000000000FE000000000000000001FF000000000000000001FF00000 +0000000000001FF000000000000000003FF800000000000000003FF80000000000000000 +7FFC00000000000000007FFC00000000000000007FFC0000000000000000FFFE00000000 +00000000FFFE0000000000000001FFFF0000000000000001FFFF0000000000000001FFFF +0000000000000003FFFF8000000000000003FFFF8000000000000007FFFFC00000000000 +0007DFFFC000000000000007CFFFC00000000000000FCFFFE00000000000000F87FFE000 +00000000001F87FFF00000000000001F07FFF00000000000001F03FFF00000000000003F +03FFF80000000000003E01FFF80000000000007E01FFFC0000000000007C01FFFC000000 +0000007C00FFFC000000000000FC00FFFE000000000000F8007FFE000000000001F8007F +FF000000000001F0007FFF000000000001F0003FFF000000000003F0003FFF8000000000 +03E0001FFF800000000007E0001FFFC00000000007C0001FFFC00000000007C0000FFFC0 +000000000FC0000FFFE0000000000F800007FFE0000000001F800007FFF0000000001F00 +0007FFF0000000001F000003FFF0000000003F000003FFF8000000003E000001FFF80000 +00007FFFFFFFFFFC000000007FFFFFFFFFFC000000007FFFFFFFFFFC00000000FFFFFFFF +FFFE00000000F80000007FFE00000001F80000007FFF00000001F00000003FFF00000001 +F00000003FFF00000003F00000003FFF80000003E00000001FFF80000007E00000001FFF +C0000007C00000000FFFC0000007C00000000FFFC000000F800000000FFFE000000F8000 +000007FFE000001F8000000007FFF000001F0000000003FFF000001F0000000003FFF000 +003E0000000003FFF80000FFC000000001FFF800FFFFFF800007FFFFFFFEFFFFFF800007 +FFFFFFFEFFFFFF800007FFFFFFFEFFFFFF800007FFFFFFFE4F487CC758>65 +D<000000007FFE0000060000000FFFFFC0001E0000007FFFFFF8003E000003FFFFFFFE00 +7E00000FFFF800FF80FE00003FFF80001FC1FE0000FFFC000007F3FE0001FFF0000001FF +FE0007FFC00000007FFE000FFF000000003FFE001FFE000000001FFE003FFC000000000F +FE007FF80000000007FE00FFF00000000007FE01FFF00000000003FE01FFE00000000001 +FE03FFE00000000001FE07FFC00000000000FE07FFC00000000000FE0FFF800000000000 +FE0FFF8000000000007E1FFF8000000000007E1FFF0000000000007E3FFF000000000000 +3E3FFF0000000000003E3FFF0000000000003E7FFF0000000000003E7FFE000000000000 +007FFE000000000000007FFE00000000000000FFFE00000000000000FFFE000000000000 +00FFFE00000000000000FFFE00000000000000FFFE00000000000000FFFE000000000000 +00FFFE00000000000000FFFE00000000000000FFFE00000000000000FFFE000000000000 +00FFFE00000000000000FFFE00000000000000FFFE000000000000007FFE000000000000 +007FFE000000000000007FFE000000000000007FFF000000000000003FFF000000000000 +1E3FFF0000000000001E3FFF0000000000001E1FFF0000000000001E1FFF800000000000 +1E0FFF8000000000003E0FFF8000000000003C07FFC000000000003C07FFC00000000000 +3C03FFE000000000007801FFE000000000007801FFF00000000000F000FFF00000000001 +F0007FF80000000001E0003FFC0000000003C0001FFE000000000780000FFF800000000F +000007FFC00000003E000001FFF00000007C000000FFFC000001F80000003FFF80000FF0 +0000000FFFF8007FC000000003FFFFFFFF00000000007FFFFFFC00000000000FFFFFE000 +00000000007FFE00000047497AC754>67 DI<000000007FFC00000C000000000FFFFFC0003C0000 +0000FFFFFFF0007C00000003FFFFFFFC00FC0000001FFFF001FF01FC0000003FFF00003F +C3FC000000FFF8000007E7FC000003FFE0000003FFFC000007FFC0000000FFFC00000FFF +000000007FFC00001FFE000000003FFC00003FFC000000001FFC00007FF8000000000FFC +0000FFF0000000000FFC0001FFF00000000007FC0003FFE00000000003FC0003FFE00000 +000003FC0007FFC00000000001FC0007FFC00000000001FC000FFF800000000001FC001F +FF800000000000FC001FFF800000000000FC001FFF000000000000FC003FFF0000000000 +007C003FFF0000000000007C003FFF0000000000007C007FFF0000000000007C007FFE00 +000000000000007FFE00000000000000007FFE0000000000000000FFFE00000000000000 +00FFFE0000000000000000FFFE0000000000000000FFFE0000000000000000FFFE000000 +0000000000FFFE0000000000000000FFFE0000000000000000FFFE0000000000000000FF +FE0000000000000000FFFE0000000000000000FFFE0000000000000000FFFE0000000000 +000000FFFE00000000000000007FFE0000003FFFFFFFFC7FFE0000003FFFFFFFFC7FFE00 +00003FFFFFFFFC7FFF0000003FFFFFFFFC3FFF0000000000FFFC003FFF0000000000FFFC +003FFF0000000000FFFC001FFF0000000000FFFC001FFF8000000000FFFC001FFF800000 +0000FFFC000FFF8000000000FFFC0007FFC000000000FFFC0007FFC000000000FFFC0003 +FFE000000000FFFC0003FFE000000000FFFC0001FFF000000000FFFC0000FFF000000000 +FFFC00007FF800000000FFFC00003FFC00000000FFFC00001FFE00000000FFFC00000FFF +00000001FFFC000007FFC0000001FFFC000003FFF0000003FFFC000000FFFC00000FCFFC +0000003FFF80001F87FC0000001FFFF800FF03FC00000003FFFFFFFE00FC00000000FFFF +FFF8003C000000000FFFFFE0000C00000000007FFE000000004E497AC75B>71 +D75 +DI< +FFFFFFFFFFFF000000FFFFFFFFFFFFF80000FFFFFFFFFFFFFE0000FFFFFFFFFFFFFFC000 +001FFF80001FFFE000001FFF800001FFF800001FFF800000FFFC00001FFF8000003FFE00 +001FFF8000001FFF00001FFF8000001FFF80001FFF8000000FFF80001FFF8000000FFFC0 +001FFF80000007FFC0001FFF80000007FFE0001FFF80000007FFE0001FFF80000007FFF0 +001FFF80000007FFF0001FFF80000007FFF0001FFF80000007FFF0001FFF80000007FFF0 +001FFF80000007FFF0001FFF80000007FFF0001FFF80000007FFF0001FFF80000007FFE0 +001FFF80000007FFE0001FFF80000007FFE0001FFF80000007FFC0001FFF8000000FFFC0 +001FFF8000000FFF80001FFF8000001FFF80001FFF8000001FFF00001FFF8000003FFE00 +001FFF800000FFFC00001FFF800003FFF000001FFF80001FFFE000001FFFFFFFFFFF8000 +001FFFFFFFFFFC0000001FFFFFFFFFE00000001FFF800000000000001FFF800000000000 +001FFF800000000000001FFF800000000000001FFF800000000000001FFF800000000000 +001FFF800000000000001FFF800000000000001FFF800000000000001FFF800000000000 +001FFF800000000000001FFF800000000000001FFF800000000000001FFF800000000000 +001FFF800000000000001FFF800000000000001FFF800000000000001FFF800000000000 +001FFF800000000000001FFF800000000000001FFF800000000000001FFF800000000000 +001FFF800000000000001FFF800000000000001FFF800000000000001FFF800000000000 +001FFF800000000000001FFF800000000000001FFF800000000000FFFFFFFFF000000000 +FFFFFFFFF000000000FFFFFFFFF000000000FFFFFFFFF00000000044477CC64F>80 +D<00000003FFF0000000000000007FFFFF80000000000003FFFFFFF000000000000FFF80 +7FFC00000000003FF80007FF0000000000FFE00001FFC000000003FFC00000FFF0000000 +07FF0000003FF80000000FFE0000001FFC0000001FFC0000000FFE0000003FF800000007 +FF0000007FF800000007FF800000FFF000000003FFC00001FFE000000001FFE00003FFE0 +00000001FFF00003FFC000000000FFF00007FFC000000000FFF80007FFC000000000FFF8 +000FFF80000000007FFC000FFF80000000007FFC001FFF80000000007FFE001FFF000000 +00003FFE003FFF00000000003FFF003FFF00000000003FFF003FFF00000000003FFF007F +FF00000000003FFF807FFF00000000003FFF807FFE00000000001FFF807FFE0000000000 +1FFF807FFE00000000001FFF80FFFE00000000001FFFC0FFFE00000000001FFFC0FFFE00 +000000001FFFC0FFFE00000000001FFFC0FFFE00000000001FFFC0FFFE00000000001FFF +C0FFFE00000000001FFFC0FFFE00000000001FFFC0FFFE00000000001FFFC0FFFE000000 +00001FFFC0FFFE00000000001FFFC0FFFE00000000001FFFC0FFFE00000000001FFFC0FF +FE00000000001FFFC07FFE00000000001FFF807FFE00000000001FFF807FFE0000000000 +1FFF807FFF00000000003FFF803FFF00000000003FFF003FFF00000000003FFF003FFF00 +000000003FFF001FFF00000000003FFE001FFF80000000007FFE001FFF80000000007FFE +000FFF80000000007FFC000FFFC000000000FFFC0007FFC0003F0000FFF80003FFC000FF +C000FFF00003FFE001FFF001FFF00001FFE003C0F801FFE00000FFF007803C03FFC00000 +7FF807001E07FF8000007FF80E000F07FF8000003FFC0E00070FFF0000000FFE0E00079F +FC00000007FF0E0007FFF800000003FFCE0003FFF000000000FFE70003FFC0000000007F +FF8007FF80000000000FFFC07FFC000000000003FFFFFFF80000400000007FFFFFFC0000 +E000000003FFF1FC0000E0000000000000FE0000E0000000000000FF0001E00000000000 +00FF8003E0000000000000FFE01FE0000000000000FFFFFFE00000000000007FFFFFC000 +00000000007FFFFFC00000000000007FFFFFC00000000000003FFFFF800000000000003F +FFFF800000000000003FFFFF800000000000001FFFFF000000000000001FFFFF00000000 +0000000FFFFE000000000000000FFFFC0000000000000007FFF80000000000000003FFF0 +0000000000000000FFE000000000000000003F00004B5C7AC757>II<00 +001FFFC0000001FFFFFC000007FFFFFF00001FF8007F80007FE000FFC000FF8000FFC001 +FF0001FFE003FE0001FFE007FE0001FFE00FFC0001FFE00FFC0001FFE01FF80000FFC03F +F800007F803FF800003F007FF8000000007FF0000000007FF0000000007FF000000000FF +F000000000FFF000000000FFF000000000FFF000000000FFF000000000FFF000000000FF +F000000000FFF000000000FFF000000000FFF0000000007FF0000000007FF0000000007F +F8000000007FF8000000003FF8000000003FF8000000001FFC000000F00FFC000000F00F +FE000001E007FE000001E003FF000003C001FF8000078000FFC0000F00007FF0003E0000 +1FFC01FC000007FFFFF0000001FFFFC00000001FFC00002C2E7CAD34>99 +D<0000000000FF8000000001FFFF8000000001FFFF8000000001FFFF8000000001FFFF80 +0000000007FF800000000003FF800000000003FF800000000003FF800000000003FF8000 +00000003FF800000000003FF800000000003FF800000000003FF800000000003FF800000 +000003FF800000000003FF800000000003FF800000000003FF800000000003FF80000000 +0003FF800000000003FF800000000003FF800000000003FF800000000003FF8000000000 +03FF8000001FFC03FF800001FFFF83FF800007FFFFE3FF80001FFC03F3FF80007FE0007F +FF8000FF80003FFF8001FF00000FFF8003FE000007FF8007FE000003FF800FFC000003FF +800FFC000003FF801FF8000003FF803FF8000003FF803FF8000003FF803FF0000003FF80 +7FF0000003FF807FF0000003FF807FF0000003FF80FFF0000003FF80FFF0000003FF80FF +F0000003FF80FFF0000003FF80FFF0000003FF80FFF0000003FF80FFF0000003FF80FFF0 +000003FF80FFF0000003FF80FFF0000003FF807FF0000003FF807FF0000003FF807FF000 +0003FF807FF0000003FF803FF8000003FF803FF8000003FF801FF8000003FF801FF80000 +03FF800FFC000003FF8007FC000007FF8003FE00000FFF8001FF00001FFF8000FF80007F +FF80007FC000FBFFC0003FF807E3FFFF000FFFFFC3FFFF0001FFFF03FFFF00003FF003FF +FF38487CC741>I<00001FFC00000001FFFFC0000007FFFFF000001FF80FFC00007FC001 +FE0000FF8000FF0001FF00007F8003FE00003FC007FC00001FE00FFC00001FE01FF80000 +1FF01FF800000FF03FF800000FF83FF800000FF83FF000000FF87FF0000007F87FF00000 +07FC7FF0000007FCFFF0000007FCFFF0000007FCFFFFFFFFFFFCFFFFFFFFFFFCFFFFFFFF +FFFCFFF000000000FFF000000000FFF000000000FFF000000000FFF0000000007FF00000 +00007FF0000000007FF0000000003FF8000000003FF8000000001FF80000003C1FF80000 +003C0FFC0000003C0FFC0000007807FE000000F803FF000001F001FF800003E000FFC000 +07C0003FE0001F80001FFC01FF000007FFFFFC000000FFFFE00000000FFF00002E2E7DAD +35>I<000000FFC00000001FFFF00000007FFFFC000001FFC0FE000003FF01FF000007FC +03FF00000FF807FF80001FF807FF80003FF007FF80007FF007FF80007FE007FF8000FFE0 +03FF0000FFE001FE0000FFE000FC0000FFE000300000FFE000000000FFE000000000FFE0 +00000000FFE000000000FFE000000000FFE000000000FFE000000000FFE000000000FFE0 +00000000FFE000000000FFE0000000FFFFFFFE0000FFFFFFFE0000FFFFFFFE0000FFFFFF +FE000000FFE000000000FFE000000000FFE000000000FFE000000000FFE000000000FFE0 +00000000FFE000000000FFE000000000FFE000000000FFE000000000FFE000000000FFE0 +00000000FFE000000000FFE000000000FFE000000000FFE000000000FFE000000000FFE0 +00000000FFE000000000FFE000000000FFE000000000FFE000000000FFE000000000FFE0 +00000000FFE000000000FFE000000000FFE000000000FFE000000000FFE000000000FFE0 +00000000FFE000000000FFE000000000FFE000000000FFE000000000FFE000000000FFE0 +00000000FFE000000000FFE00000007FFFFFF000007FFFFFF000007FFFFFF000007FFFFF +F0000029487DC724>I<007FC000000000FFFFC000000000FFFFC000000000FFFFC00000 +0000FFFFC00000000003FFC00000000001FFC00000000001FFC00000000001FFC0000000 +0001FFC00000000001FFC00000000001FFC00000000001FFC00000000001FFC000000000 +01FFC00000000001FFC00000000001FFC00000000001FFC00000000001FFC00000000001 +FFC00000000001FFC00000000001FFC00000000001FFC00000000001FFC00000000001FF +C00000000001FFC00000000001FFC007FE000001FFC03FFFC00001FFC07FFFF00001FFC1 +F81FF80001FFC3C00FFC0001FFC70007FE0001FFCE0007FE0001FFDC0007FF0001FFF800 +03FF0001FFF80003FF8001FFF00003FF8001FFF00003FF8001FFE00003FF8001FFE00003 +FF8001FFE00003FF8001FFC00003FF8001FFC00003FF8001FFC00003FF8001FFC00003FF +8001FFC00003FF8001FFC00003FF8001FFC00003FF8001FFC00003FF8001FFC00003FF80 +01FFC00003FF8001FFC00003FF8001FFC00003FF8001FFC00003FF8001FFC00003FF8001 +FFC00003FF8001FFC00003FF8001FFC00003FF8001FFC00003FF8001FFC00003FF8001FF +C00003FF8001FFC00003FF8001FFC00003FF8001FFC00003FF8001FFC00003FF8001FFC0 +0003FF8001FFC00003FF8001FFC00003FF80FFFFFF81FFFFFFFFFFFF81FFFFFFFFFFFF81 +FFFFFFFFFFFF81FFFFFF38487BC741>104 D<007C0001FF0003FF8007FFC007FFC00FFF +E00FFFE00FFFE00FFFE00FFFE007FFC007FFC003FF8001FF00007C000000000000000000 +00000000000000000000000000000000000000000000000000000000007FC0FFFFC0FFFF +C0FFFFC0FFFFC003FFC001FFC001FFC001FFC001FFC001FFC001FFC001FFC001FFC001FF +C001FFC001FFC001FFC001FFC001FFC001FFC001FFC001FFC001FFC001FFC001FFC001FF +C001FFC001FFC001FFC001FFC001FFC001FFC001FFC001FFC001FFC001FFC001FFC001FF +C001FFC001FFC001FFC0FFFFFFFFFFFFFFFFFFFFFFFF18497CC820>I<007FC000000000 +FFFFC000000000FFFFC000000000FFFFC000000000FFFFC00000000003FFC00000000001 +FFC00000000001FFC00000000001FFC00000000001FFC00000000001FFC00000000001FF +C00000000001FFC00000000001FFC00000000001FFC00000000001FFC00000000001FFC0 +0000000001FFC00000000001FFC00000000001FFC00000000001FFC00000000001FFC000 +00000001FFC00000000001FFC00000000001FFC00000000001FFC00000000001FFC0003F +FFF001FFC0003FFFF001FFC0003FFFF001FFC0003FFFF001FFC00007FE0001FFC00003F0 +0001FFC00007E00001FFC0001F800001FFC0003F000001FFC0007E000001FFC000FC0000 +01FFC001F8000001FFC007E0000001FFC00FC0000001FFC01F80000001FFC03F00000001 +FFC07E00000001FFC1FE00000001FFC3FF00000001FFC7FF00000001FFCFFF80000001FF +DFFFC0000001FFFFFFE0000001FFFDFFE0000001FFF8FFF0000001FFF07FF8000001FFC0 +7FFC000001FF803FFC000001FF801FFE000001FF800FFF000001FF800FFF800001FF8007 +FF800001FF8003FFC00001FF8001FFE00001FF8001FFF00001FF8000FFF00001FF80007F +F80001FF80003FFC0001FF80003FFE0001FF80001FFE0001FF80000FFF0001FF80000FFF +C0FFFFFF00FFFFFEFFFFFF00FFFFFEFFFFFF00FFFFFEFFFFFF00FFFFFE37487DC73D> +107 D<00FF8007FE0000FFFF803FFFC000FFFF807FFFF000FFFF81F81FF800FFFF83C00F +FC0003FF870007FE0001FF8E0007FE0001FF9C0007FF0001FFB80003FF0001FFB80003FF +8001FFF00003FF8001FFF00003FF8001FFE00003FF8001FFE00003FF8001FFE00003FF80 +01FFC00003FF8001FFC00003FF8001FFC00003FF8001FFC00003FF8001FFC00003FF8001 +FFC00003FF8001FFC00003FF8001FFC00003FF8001FFC00003FF8001FFC00003FF8001FF +C00003FF8001FFC00003FF8001FFC00003FF8001FFC00003FF8001FFC00003FF8001FFC0 +0003FF8001FFC00003FF8001FFC00003FF8001FFC00003FF8001FFC00003FF8001FFC000 +03FF8001FFC00003FF8001FFC00003FF8001FFC00003FF8001FFC00003FF8001FFC00003 +FF8001FFC00003FF80FFFFFF81FFFFFFFFFFFF81FFFFFFFFFFFF81FFFFFFFFFFFF81FFFF +FF382E7BAD41>110 D<00000FFE0000000001FFFFF000000007FFFFFC0000001FF803FF +0000007FE000FFC00000FF80003FE00001FF00001FF00003FE00000FF80007FC000007FC +000FFC000007FE000FF8000003FE001FF8000003FF003FF8000003FF803FF0000001FF80 +3FF0000001FF807FF0000001FFC07FF0000001FFC07FF0000001FFC0FFF0000001FFE0FF +F0000001FFE0FFF0000001FFE0FFF0000001FFE0FFF0000001FFE0FFF0000001FFE0FFF0 +000001FFE0FFF0000001FFE0FFF0000001FFE0FFF0000001FFE07FF0000001FFC07FF000 +0001FFC07FF0000001FFC07FF0000001FFC03FF8000003FF803FF8000003FF801FF80000 +03FF001FFC000007FF000FFC000007FE0007FE00000FFC0003FE00000FF80001FF00001F +F00000FF80003FE000007FE000FFC000001FFC07FF00000007FFFFFC00000001FFFFF000 +0000001FFF000000332E7DAD3A>I<007F803F80FFFF80FFE0FFFF81FFF8FFFF83C3FCFF +FF8707FE03FF8E07FE01FF9C0FFF01FFB80FFF01FFB80FFF01FFF00FFF01FFF00FFF01FF +F007FE01FFE003FC01FFE001F801FFE0000001FFE0000001FFC0000001FFC0000001FFC0 +000001FFC0000001FFC0000001FFC0000001FFC0000001FFC0000001FFC0000001FFC000 +0001FFC0000001FFC0000001FFC0000001FFC0000001FFC0000001FFC0000001FFC00000 +01FFC0000001FFC0000001FFC0000001FFC0000001FFC0000001FFC0000001FFC0000001 +FFC0000001FFC00000FFFFFFE000FFFFFFE000FFFFFFE000FFFFFFE000282E7DAD2F> +114 D<000FFE01C000FFFFC7C003FFFFFFC007F801FFC00FC0007FC01F00001FC03F0000 +0FC07E000007C07E000007C07E000003C0FE000003C0FE000003C0FF000003C0FF800003 +C0FFE0000000FFF80000007FFFC000007FFFFF00003FFFFFE0003FFFFFF8001FFFFFFC00 +0FFFFFFF0007FFFFFF8001FFFFFFC0007FFFFFE0001FFFFFE00000FFFFF0000003FFF000 +00007FF87000001FF8F000000FF8F0000007F8F8000007F8F8000003F8F8000003F8FC00 +0003F0FC000003F0FE000003F0FF000007E0FF800007E0FFC0000FC0FFF0001F80FEFC00 +FF00F87FFFFC00F01FFFF000E003FF8000252E7CAD2E>I<0001E000000001E000000001 +E000000001E000000001E000000001E000000003E000000003E000000003E000000007E0 +00000007E000000007E00000000FE00000000FE00000001FE00000003FE00000003FE000 +00007FE0000001FFE0000003FFE000000FFFFFFFC0FFFFFFFFC0FFFFFFFFC0FFFFFFFFC0 +00FFE0000000FFE0000000FFE0000000FFE0000000FFE0000000FFE0000000FFE0000000 +FFE0000000FFE0000000FFE0000000FFE0000000FFE0000000FFE0000000FFE0000000FF +E0000000FFE0000000FFE0000000FFE0000000FFE0000000FFE0000000FFE0000000FFE0 +000000FFE0000000FFE0000000FFE000F000FFE000F000FFE000F000FFE000F000FFE000 +F000FFE000F000FFE000F000FFE000F000FFE000F0007FE000E0007FF001E0003FF001E0 +003FF003C0001FF80380000FFE0F000007FFFE000001FFFC0000001FF00024427EC12D> +I<007FC00000FF80FFFFC001FFFF80FFFFC001FFFF80FFFFC001FFFF80FFFFC001FFFF80 +03FFC00007FF8001FFC00003FF8001FFC00003FF8001FFC00003FF8001FFC00003FF8001 +FFC00003FF8001FFC00003FF8001FFC00003FF8001FFC00003FF8001FFC00003FF8001FF +C00003FF8001FFC00003FF8001FFC00003FF8001FFC00003FF8001FFC00003FF8001FFC0 +0003FF8001FFC00003FF8001FFC00003FF8001FFC00003FF8001FFC00003FF8001FFC000 +03FF8001FFC00003FF8001FFC00003FF8001FFC00003FF8001FFC00003FF8001FFC00003 +FF8001FFC00003FF8001FFC00003FF8001FFC00003FF8001FFC00007FF8001FFC00007FF +8001FFC00007FF8000FFC0000FFF8000FFC0001FFF8000FFC0001FFF80007FE0003BFF80 +003FE000F3FFC0001FF803E3FFFF000FFFFF83FFFF0003FFFF03FFFF00003FF803FFFF38 +2E7BAD41>II E end +%%EndProlog +%%BeginSetup +%%Feature: *Resolution 300dpi +TeXDict begin +%%PaperSize: Letter + @landscape +%%EndSetup +%%Page: 1 1 +1 0 bop -180 -75 a Fh(LAP)-10 b(A)m(CK)39 b(Quic)m(k)-180 +167 y(Reference)f(Guide)-180 398 y(to)h(the)-180 628 +y(Driv)m(er)h(Routines)-180 879 y Fg(Release)17 b(3.0)801 +102 y Ff(Simple)24 b(Driv)n(ers)1166 137 y Fe(Simple)11 +b(Driv)o(er)j(Routines)f(for)h(Linear)g(Equations)801 +210 y Fd(Matrix)e(T)o(yp)q(e)338 b(Routine)p 776 220 +1580 2 v 801 241 a(General)406 b Fc(SGESV\()94 b(N,)119 +b(NRHS,)15 b(A,)e(LDA,)41 b(IPIV,)14 b(B,)g(LDB,)186 +b(INFO)14 b(\))1310 270 y(CGESV\()94 b(N,)119 b(NRHS,)15 +b(A,)e(LDA,)41 b(IPIV,)14 b(B,)g(LDB,)186 b(INFO)14 b(\))801 +328 y Fd(General)d(Band)325 b Fc(SGBSV\()94 b(N,)13 b(KL,)h(KU,)g +(NRHS,)h(AB,)f(LDAB,)g(IPIV,)g(B,)g(LDB,)186 b(INFO)14 +b(\))1310 357 y(CGBSV\()94 b(N,)13 b(KL,)h(KU,)g(NRHS,)h(AB,)f(LDAB,)g +(IPIV,)g(B,)g(LDB,)186 b(INFO)14 b(\))801 415 y Fd(General)d(T)n +(ridiagonal)245 b Fc(SGTSV\()94 b(N,)119 b(NRHS,)15 b(DL,)f(D,)f(DU,)80 +b(B,)14 b(LDB,)186 b(INFO)14 b(\))1310 444 y(CGTSV\()94 +b(N,)119 b(NRHS,)15 b(DL,)f(D,)f(DU,)80 b(B,)14 b(LDB,)186 +b(INFO)14 b(\))801 502 y Fd(Symmetric/Hermitian)215 b +Fc(SPOSV\()14 b(UPLO,)h(N,)119 b(NRHS,)15 b(A,)e(LDA,)120 +b(B,)14 b(LDB,)186 b(INFO)14 b(\))801 531 y Fd(P)o(ositiv)o(e)f +(De\014nite)287 b Fc(CPOSV\()14 b(UPLO,)h(N,)119 b(NRHS,)15 +b(A,)e(LDA,)120 b(B,)14 b(LDB,)186 b(INFO)14 b(\))801 +589 y Fd(Symmetric/Hermitian)215 b Fc(SPPSV\()14 b(UPLO,)h(N,)119 +b(NRHS,)15 b(AP,)172 b(B,)14 b(LDB,)186 b(INFO)14 b(\))801 +618 y Fd(P)o(ositiv)o(e)f(De\014nite)c(\(P)o(ac)o(k)o(ed)i(Storage\))50 +b Fc(CPPSV\()14 b(UPLO,)h(N,)119 b(NRHS,)15 b(AP,)172 +b(B,)14 b(LDB,)186 b(INFO)14 b(\))801 677 y Fd(Symmetric/Hermitian)215 +b Fc(SPBSV\()14 b(UPLO,)h(N,)e(KD,)67 b(NRHS,)15 b(AB,)f(LDAB,)93 +b(B,)14 b(LDB,)186 b(INFO)14 b(\))801 706 y Fd(P)o(ositiv)o(e)f +(De\014nite)c(Band)208 b Fc(CPBSV\()14 b(UPLO,)h(N,)e(KD,)67 +b(NRHS,)15 b(AB,)f(LDAB,)93 b(B,)14 b(LDB,)186 b(INFO)14 +b(\))801 764 y Fd(Symmetric/Hermitian)215 b Fc(SPTSV\()94 +b(N,)119 b(NRHS,)15 b(D,)e(E,)146 b(B,)14 b(LDB,)186 +b(INFO)14 b(\))801 793 y Fd(P)o(ositiv)o(e)f(De\014nite)c(T)n +(ridiagonal)128 b Fc(CPTSV\()94 b(N,)119 b(NRHS,)15 b(D,)e(E,)146 +b(B,)14 b(LDB,)186 b(INFO)14 b(\))801 851 y Fd(Symmetric/Hermitian)215 +b Fc(SSYSV\()14 b(UPLO,)h(N,)119 b(NRHS,)15 b(A,)e(LDA,)41 +b(IPIV,)14 b(B,)g(LDB,)g(WORK,)h(LWORK,)f(INFO)g(\))801 +880 y Fd(Inde\014nite)381 b Fc(CSYSV\()14 b(UPLO,)h(N,)119 +b(NRHS,)15 b(A,)e(LDA,)41 b(IPIV,)14 b(B,)g(LDB,)g(WORK,)h(LWORK,)f +(INFO)g(\))1310 909 y(CHESV\()g(UPLO,)h(N,)119 b(NRHS,)15 +b(A,)e(LDA,)41 b(IPIV,)14 b(B,)g(LDB,)g(WORK,)h(LWORK,)f(INFO)g(\))801 +967 y Fd(Symmetric/Hermitian)215 b Fc(SSPSV\()14 b(UPLO,)h(N,)119 +b(NRHS,)15 b(AP,)93 b(IPIV,)14 b(B,)g(LDB,)186 b(INFO)14 +b(\))801 996 y Fd(Inde\014nite)9 b(\(P)o(ac)o(k)o(ed)i(Storage\))144 +b Fc(CSPSV\()14 b(UPLO,)h(N,)119 b(NRHS,)15 b(AP,)93 +b(IPIV,)14 b(B,)g(LDB,)186 b(INFO)14 b(\))1310 1025 y(CHPSV\()g(UPLO,)h +(N,)119 b(NRHS,)15 b(AP,)93 b(IPIV,)14 b(B,)g(LDB,)186 +b(INFO)14 b(\))801 1115 y Fe(Simple)e(Driv)o(er)h(Routines)h(for)g +(Standard)d(and)i(Generalized)e(Linear)j(Least)f(Squares)f(Problems)801 +1188 y Fd(Problem)e(T)o(yp)q(e)318 b(Routine)p 776 1199 +V 801 1219 a(Solv)o(e)11 b(Using)h(Orthogonal)g(F)n(actor,)87 +b Fc(SGELS\()14 b(TRANS,)h(M,)f(N,)f(NRHS,)i(A,)e(LDA,)i(B,)e(LDB,)173 +b(WORK,)15 b(LWORK,)f(INFO)g(\))801 1248 y Fd(Assuming)d(F)n(ull)f +(Rank)238 b Fc(CGELS\()14 b(TRANS,)h(M,)f(N,)f(NRHS,)i(A,)e(LDA,)i(B,)e +(LDB,)173 b(WORK,)15 b(LWORK,)f(INFO)g(\))801 1306 y +Fd(Solv)o(e)d(LSE)f(Problem)g(Using)i(GR)o(Q)88 b Fc(SGGLSE\()94 +b(M,)14 b(N,)f(P,)54 b(A,)13 b(LDA,)i(B,)e(LDB,)h(C,)g(D,)g(X,)53 +b(WORK,)15 b(LWORK,)f(INFO)g(\))1310 1335 y(CGGLSE\()94 +b(M,)14 b(N,)f(P,)54 b(A,)13 b(LDA,)i(B,)e(LDB,)h(C,)g(D,)g(X,)53 +b(WORK,)15 b(LWORK,)f(INFO)g(\))801 1393 y Fd(Solv)o(e)d(GLM)g(Problem) +f(Using)i(GQR)73 b Fc(SGGGLM\()94 b(N,)14 b(M,)f(P,)54 +b(A,)13 b(LDA,)i(B,)e(LDB,)h(D,)g(X,)g(Y,)53 b(WORK,)15 +b(LWORK,)f(INFO)g(\))1310 1422 y(CGGGLM\()94 b(N,)14 +b(M,)f(P,)54 b(A,)13 b(LDA,)i(B,)e(LDB,)h(D,)g(X,)g(Y,)53 +b(WORK,)15 b(LWORK,)f(INFO)g(\))p eop +%%Page: 2 2 +2 1 bop -25 -123 a Fe(Simple)12 b(and)h(Divide)h(and)e(Conquer)i(Driv)o +(er)f(Routines)h(for)g(Standard)d(Eigen)o(v)n(alue)h(and)h(Singular)g +(V)m(alue)f(Problems)-205 -58 y Fd(Matrix/Problem)g(T)o(yp)q(e)125 +b(Routine)p -229 -47 2259 2 v -205 -27 a(Symmetric/Hermitian)k +Fc(SSYEV\()28 b(JOBZ,)14 b(UPLO,)134 b(N,)66 b(A,)14 +b(LDA,)94 b(W,)344 b(WORK,)14 b(LWORK,)505 b(INFO)14 +b(\))-205 2 y Fd(Eigen)o(v)n(alues/v)o(ectors)161 b Fc(CHEEV\()28 +b(JOBZ,)14 b(UPLO,)134 b(N,)66 b(A,)14 b(LDA,)94 b(W,)344 +b(WORK,)14 b(LWORK,)h(RWORK,)412 b(INFO)14 b(\))-205 +31 y Fd(Divide)e(and)e(Conquer)153 b Fc(SSYEVD\()15 b(JOBZ,)f(UPLO,)134 +b(N,)66 b(A,)14 b(LDA,)94 b(W,)344 b(WORK,)14 b(LWORK,)214 +b(IWORK,)14 b(LIWORK,)108 b(INFO)14 b(\))218 60 y(CHEEVD\()h(JOBZ,)f +(UPLO,)134 b(N,)66 b(A,)14 b(LDA,)94 b(W,)344 b(WORK,)14 +b(LWORK,)h(RWORK,)g(LRWORK,)g(IWORK,)f(LIWORK,)108 b(INFO)14 +b(\))-205 118 y Fd(Symmetric/Hermitian)129 b Fc(SSPEV\()28 +b(JOBZ,)14 b(UPLO,)134 b(N,)66 b(AP,)147 b(W,)79 b(Z,)14 +b(LDZ,)173 b(WORK,)597 b(INFO)14 b(\))-205 147 y Fd(\(P)o(ac)o(k)o(ed)d +(Storage\))195 b Fc(CHPEV\()28 b(JOBZ,)14 b(UPLO,)134 +b(N,)66 b(AP,)147 b(W,)79 b(Z,)14 b(LDZ,)173 b(WORK,)107 +b(RWORK,)412 b(INFO)14 b(\))-205 176 y Fd(Eigen)o(v)n(alues/v)o(ectors) +-205 205 y(Divide)e(and)e(Conquer)153 b Fc(SSPEVD\()15 +b(JOBZ,)f(UPLO,)134 b(N,)66 b(AP,)147 b(W,)79 b(Z,)14 +b(LDZ,)173 b(WORK,)14 b(LWORK,)214 b(IWORK,)14 b(LIWORK,)108 +b(INFO)14 b(\))218 235 y(CHPEVD\()h(JOBZ,)f(UPLO,)134 +b(N,)66 b(AP,)147 b(W,)79 b(Z,)14 b(LDZ,)173 b(WORK,)14 +b(LWORK,)h(RWORK,)g(LRWORK,)g(IWORK,)f(LIWORK,)108 b(INFO)14 +b(\))-205 293 y Fd(Symmetric/Hermitian)9 b(Band)50 b +Fc(SSBEV\()28 b(JOBZ,)14 b(UPLO,)134 b(N,)13 b(KD,)h(AB,)g(LDAB,)68 +b(W,)79 b(Z,)14 b(LDZ,)173 b(WORK,)597 b(INFO)14 b(\))-205 +322 y Fd(Eigen)o(v)n(alues/v)o(ectors)161 b Fc(CHBEV\()28 +b(JOBZ,)14 b(UPLO,)134 b(N,)13 b(KD,)h(AB,)g(LDAB,)68 +b(W,)79 b(Z,)14 b(LDZ,)173 b(WORK,)107 b(RWORK,)412 b(INFO)14 +b(\))-205 351 y Fd(Divide)e(and)e(Conquer)153 b Fc(SSBEVD\()15 +b(JOBZ,)f(UPLO,)134 b(N,)13 b(KD,)h(AB,)g(LDAB,)68 b(W,)79 +b(Z,)14 b(LDZ,)173 b(WORK,)14 b(LWORK,)214 b(IWORK,)14 +b(LIWORK,)108 b(INFO)14 b(\))218 380 y(CHBEVD\()h(JOBZ,)f(UPLO,)134 +b(N,)13 b(KD,)h(AB,)g(LDAB,)68 b(W,)79 b(Z,)14 b(LDZ,)173 +b(WORK,)14 b(LWORK,)h(RWORK,)g(LRWORK,)g(IWORK,)f(LIWORK,)108 +b(INFO)14 b(\))-205 438 y Fd(Symmetric)8 b(T)n(ridiagonal)120 +b Fc(SSTEV\()28 b(JOBZ,)213 b(N,)66 b(D,)14 b(E,)225 +b(Z,)14 b(LDZ,)173 b(WORK,)597 b(INFO)14 b(\))-205 467 +y Fd(Eigen)o(v)n(alues/v)o(ectors)-205 496 y(Divide)e(and)e(Conquer)153 +b Fc(SSTEVD\()15 b(JOBZ,)213 b(N,)66 b(D,)14 b(E,)225 +b(Z,)14 b(LDZ,)173 b(WORK,)14 b(LWORK,)214 b(IWORK,)14 +b(LIWORK,)108 b(INFO)14 b(\))-205 554 y Fd(General)320 +b Fc(SGEES\()15 b(JOBVS,)f(SORT,)h(SELECT,)28 b(N,)66 +b(A,)14 b(LDA,)g(SDIM,)h(WR,)e(WI,)h(VS,)g(LDVS,)147 +b(WORK,)14 b(LWORK,)412 b(BWORK,)15 b(INFO)f(\))-205 +583 y Fd(Sc)o(h)o(ur)c(F)n(actorization)163 b Fc(CGEES\()15 +b(JOBVS,)f(SORT,)h(SELECT,)28 b(N,)66 b(A,)14 b(LDA,)g(SDIM,)h(W,)79 +b(VS,)14 b(LDVS,)147 b(WORK,)14 b(LWORK,)h(RWORK,)319 +b(BWORK,)15 b(INFO)f(\))-205 641 y Fd(General)320 b Fc(SGEEV\()15 +b(JOBVL,)f(JOBVR,)121 b(N,)66 b(A,)14 b(LDA,)94 b(WR,)13 +b(WI,)h(VL,)g(LDVL,)h(VR,)f(LDVR,)g(WORK,)g(LWORK,)505 +b(INFO)14 b(\))-205 670 y Fd(Eigen)o(v)n(alues/v)o(ectors)161 +b Fc(CGEEV\()15 b(JOBVL,)f(JOBVR,)121 b(N,)66 b(A,)14 +b(LDA,)94 b(W,)79 b(VL,)14 b(LDVL,)h(VR,)f(LDVR,)g(WORK,)g(LWORK,)h +(RWORK,)412 b(INFO)14 b(\))-205 728 y Fd(General)320 +b Fc(SGESVD\()15 b(JOBU,)f(JOBVT,)81 b(M,)14 b(N,)66 +b(A,)14 b(LDA,)94 b(S,)f(U,)27 b(LDU,)14 b(VT,)g(LDVT,)g(WORK,)g +(LWORK,)505 b(INFO)14 b(\))-205 758 y Fd(Singular)e(V)n(alues/V)n +(ectors)102 b Fc(CGESVD\()15 b(JOBU,)f(JOBVT,)81 b(M,)14 +b(N,)66 b(A,)14 b(LDA,)94 b(S,)f(U,)27 b(LDU,)14 b(VT,)g(LDVT,)g(WORK,) +g(LWORK,)h(RWORK,)412 b(INFO)14 b(\))-205 787 y Fd(Divide)e(and)e +(Conquer)153 b Fc(SGESDD\()15 b(JOBZ,)173 b(M,)14 b(N,)66 +b(A,)14 b(LDA,)94 b(S,)f(U,)27 b(LDU,)14 b(VT,)g(LDVT,)g(WORK,)g +(LWORK,)214 b(IWORK,)f(INFO)14 b(\))218 816 y(CGESDD\()h(JOBZ,)173 +b(M,)14 b(N,)66 b(A,)14 b(LDA,)94 b(S,)f(U,)27 b(LDU,)14 +b(VT,)g(LDVT,)g(WORK,)g(LWORK,)h(RWORK,)121 b(IWORK,)213 +b(INFO)14 b(\))84 926 y Fe(Simple)e(and)h(Divide)h(and)e(Conquer)i +(Driv)o(er)f(Routines)h(for)g(Generalized)e(Eigen)o(v)n(alue)g(and)h +(Singular)g(V)m(alue)f(Problems)-205 991 y Fd(Matrix/Problem)g(T)o(yp)q +(e)73 b(Routine)p -229 1002 2525 2 v -205 1022 a(Symmetric-de\014nite) +116 b Fc(SSYGV\()28 b(ITYPE,)15 b(JOBZ,)f(UPLO,)160 b(N,)14 +b(A,)f(LDA,)i(B,)e(LDB,)94 b(W,)583 b(WORK,)14 b(LWORK,)411 +b(INFO)15 b(\))-205 1051 y Fd(Eigen)o(v)n(alues/v)o(ectors)109 +b Fc(CHEGV\()28 b(ITYPE,)15 b(JOBZ,)f(UPLO,)160 b(N,)14 +b(A,)f(LDA,)i(B,)e(LDB,)94 b(W,)583 b(WORK,)14 b(LWORK,)h(RWORK,)318 +b(INFO)15 b(\))-205 1080 y Fd(Divide)d(and)e(Conquer)101 +b Fc(SSYGVD\()15 b(ITYPE,)g(JOBZ,)f(UPLO,)160 b(N,)14 +b(A,)f(LDA,)i(B,)e(LDB,)94 b(W,)583 b(WORK,)14 b(LWORK,)213 +b(IWORK,)15 b(LIWORK,)f(INFO)h(\))166 1109 y(CHEGVD\()g(ITYPE,)g(JOBZ,) +f(UPLO,)160 b(N,)14 b(A,)f(LDA,)i(B,)e(LDB,)94 b(W,)583 +b(WORK,)14 b(LWORK,)h(RWORK,)f(LRWORK,)h(IWORK,)g(LIWORK,)f(INFO)h(\)) +-205 1167 y Fd(Symmetric-de\014nite)116 b Fc(SSPGV\()28 +b(ITYPE,)15 b(JOBZ,)f(UPLO,)160 b(N,)14 b(AP,)67 b(BP,)146 +b(W,)265 b(Z,)14 b(LDZ,)226 b(WORK,)503 b(INFO)15 b(\))-205 +1196 y Fd(\(P)o(ac)o(k)o(ed)c(Storage\))143 b Fc(CHPGV\()28 +b(ITYPE,)15 b(JOBZ,)f(UPLO,)160 b(N,)14 b(AP,)67 b(BP,)146 +b(W,)265 b(Z,)14 b(LDZ,)226 b(WORK,)107 b(RWORK,)318 +b(INFO)15 b(\))-205 1225 y Fd(Eigen)o(v)n(alues/v)o(ectors)-205 +1254 y(Divide)d(and)e(Conquer)101 b Fc(SSPGVD\()15 b(ITYPE,)g(JOBZ,)f +(UPLO,)160 b(N,)14 b(AP,)67 b(BP,)146 b(W,)265 b(Z,)14 +b(LDZ,)226 b(WORK,)14 b(LWORK,)213 b(IWORK,)15 b(LIWORK,)f(INFO)h(\)) +166 1283 y(CHPGVD\()g(ITYPE,)g(JOBZ,)f(UPLO,)160 b(N,)14 +b(AP,)67 b(BP,)146 b(W,)265 b(Z,)14 b(LDZ,)226 b(WORK,)14 +b(LWORK,)h(RWORK,)f(LRWORK,)h(IWORK,)g(LIWORK,)f(INFO)h(\))-205 +1342 y Fd(Symmetric-de\014nite)116 b Fc(SSBGV\()121 b(JOBZ,)14 +b(UPLO,)160 b(N,)14 b(KA,)g(KB,)g(AB,)g(LDAB,)g(BB,)g(LDBB,)27 +b(W,)173 b(Z,)14 b(LDZ,)226 b(WORK,)503 b(INFO)15 b(\))-205 +1371 y Fd(\(Band)c(Storage\))166 b Fc(CHBGV\()121 b(JOBZ,)14 +b(UPLO,)160 b(N,)14 b(KA,)g(KB,)g(AB,)g(LDAB,)g(BB,)g(LDBB,)27 +b(W,)173 b(Z,)14 b(LDZ,)226 b(WORK,)107 b(RWORK,)318 +b(INFO)15 b(\))-205 1400 y Fd(Eigen)o(v)n(alues/v)o(ectors)-205 +1429 y(Divide)d(and)e(Conquer)101 b Fc(SSBGVD\()108 b(JOBZ,)14 +b(UPLO,)160 b(N,)14 b(KA,)g(KB,)g(AB,)g(LDAB,)g(BB,)g(LDBB,)27 +b(W,)173 b(Z,)14 b(LDZ,)226 b(WORK,)14 b(LWORK,)213 b(IWORK,)15 +b(LIWORK,)f(INFO)h(\))166 1458 y(CHBGVD\()108 b(JOBZ,)14 +b(UPLO,)160 b(N,)14 b(KA,)g(KB,)g(AB,)g(LDAB,)g(BB,)g(LDBB,)27 +b(W,)173 b(Z,)14 b(LDZ,)226 b(WORK,)14 b(LWORK,)h(RWORK,)f(LRWORK,)h +(IWORK,)g(LIWORK,)f(INFO)h(\))-205 1516 y Fd(General)268 +b Fc(SGGES\()15 b(JOBVSL,)g(JOBVSR,)g(SORT,)f(SELCTG,)h(N,)f(A,)f(LDA,) +28 b(B,)13 b(LDB,)h(SDIM,)h(ALPHAR,)g(ALPHAI,)g(BETA,)f(VSL,)g(LDVSL,)h +(VSR,)f(LDVSR,)h(WORK,)f(LWORK,)213 b(BWORK,)120 b(INFO)15 +b(\))-205 1545 y Fd(Sc)o(h)o(ur)10 b(F)n(actorization)111 +b Fc(CGGES\()15 b(JOBVSL,)g(JOBVSR,)g(SORT,)f(SELCTG,)h(N,)f(A,)f(LDA,) +28 b(B,)13 b(LDB,)h(SDIM,)h(ALPHA,)134 b(BETA,)14 b(VSL,)g(LDVSL,)h +(VSR,)f(LDVSR,)h(WORK,)f(LWORK,)h(RWORK,)120 b(BWORK,)g(INFO)15 +b(\))-205 1603 y Fd(General)268 b Fc(SGGEV\()28 b(JOBVL,)g(JOBVR,)200 +b(N,)14 b(A,)f(LDA,)28 b(B,)13 b(LDB,)81 b(ALPHAR,)14 +b(ALPHAI,)h(BETA,)g(VL,)f(LDVL,)g(VR,)g(LDVR,)81 b(WORK,)14 +b(LWORK,)411 b(INFO)15 b(\))-205 1632 y Fd(Eigen)o(v)n(alues/v)o +(ectors)109 b Fc(CGGEV\()28 b(JOBVL,)g(JOBVR,)200 b(N,)14 +b(A,)f(LDA,)28 b(B,)13 b(LDB,)81 b(ALPHA,)133 b(BETA,)15 +b(VL,)f(LDVL,)g(VR,)g(LDVR,)81 b(WORK,)14 b(LWORK,)h(RWORK,)318 +b(INFO)15 b(\))-205 1690 y Fd(General)268 b Fc(SGGSVD\()15 +b(JOBU,)f(JOBV,)h(JOBQ,)f(M,)g(N,)g(P,)f(K,)h(L,)g(A,)f(LDA,)i(B,)e +(LDB,)81 b(ALPHA,)133 b(BETA,)15 b(U,)e(LDU,)i(V,)e(LDV,)h(Q,)g(LDQ,)28 +b(WORK,)305 b(IWORK,)120 b(INFO)15 b(\))-205 1719 y Fd(Singular)d(V)n +(alues/V)n(ectors)50 b Fc(CGGSVD\()15 b(JOBU,)f(JOBV,)h(JOBQ,)f(M,)g +(N,)g(P,)f(K,)h(L,)g(A,)f(LDA,)i(B,)e(LDB,)81 b(ALPHA,)133 +b(BETA,)15 b(U,)e(LDU,)i(V,)e(LDV,)h(Q,)g(LDQ,)28 b(WORK,)107 +b(RWORK,)120 b(IWORK,)g(INFO)15 b(\))p eop +%%Page: 3 3 +3 2 bop -229 -101 a Ff(Exp)r(ert)23 b(Driv)n(ers)581 +-32 y Fe(Exp)q(ert)13 b(Driv)o(er)g(Routines)g(for)i(Linear)e +(Equations)-205 56 y Fd(Matrix)f(T)o(yp)q(e)338 b(Routine)p +-229 67 2425 2 v -205 87 a(General)406 b Fc(SGESVX\()15 +b(FACT,)f(TRANS,)h(N,)120 b(NRHS,)14 b(A,)g(LDA,)53 b(AF,)14 +b(LDAF,)41 b(IPIV,)15 b(EQUED,)f(R,)g(C,)g(B,)f(LDB,)i(X,)e(LDX,)h +(RCOND,)h(FERR,)f(BERR,)h(WORK,)107 b(IWORK,)14 b(INFO)h(\))304 +116 y(CGESVX\()g(FACT,)f(TRANS,)h(N,)120 b(NRHS,)14 b(A,)g(LDA,)53 +b(AF,)14 b(LDAF,)41 b(IPIV,)15 b(EQUED,)f(R,)g(C,)g(B,)f(LDB,)i(X,)e +(LDX,)h(RCOND,)h(FERR,)f(BERR,)h(WORK,)107 b(RWORK,)14 +b(INFO)h(\))-205 174 y Fd(General)c(Band)325 b Fc(SGBSVX\()15 +b(FACT,)f(TRANS,)h(N,)f(KL,)g(KU,)g(NRHS,)g(AB,)g(LDAB,)27 +b(AFB,)15 b(LDAFB,)f(IPIV,)h(EQUED,)f(R,)g(C,)g(B,)f(LDB,)i(X,)e(LDX,)h +(RCOND,)h(FERR,)f(BERR,)h(WORK,)107 b(IWORK,)14 b(INFO)h(\))304 +203 y(CGBSVX\()g(FACT,)f(TRANS,)h(N,)f(KL,)g(KU,)g(NRHS,)g(AB,)g(LDAB,) +27 b(AFB,)15 b(LDAFB,)f(IPIV,)h(EQUED,)f(R,)g(C,)g(B,)f(LDB,)i(X,)e +(LDX,)h(RCOND,)h(FERR,)f(BERR,)h(WORK,)107 b(RWORK,)14 +b(INFO)h(\))-205 261 y Fd(General)c(T)n(ridiagonal)245 +b Fc(SGTSVX\()15 b(FACT,)f(TRANS,)h(N,)120 b(NRHS,)14 +b(DL,)g(D,)g(DU,)f(DLF,)i(DF,)f(DUF,)g(DU2,)g(IPIV,)94 +b(B,)13 b(LDB,)i(X,)e(LDX,)h(RCOND,)h(FERR,)f(BERR,)h(WORK,)107 +b(IWORK,)14 b(INFO)h(\))304 290 y(CGTSVX\()g(FACT,)f(TRANS,)h(N,)120 +b(NRHS,)14 b(DL,)g(D,)g(DU,)f(DLF,)i(DF,)f(DUF,)g(DU2,)g(IPIV,)94 +b(B,)13 b(LDB,)i(X,)e(LDX,)h(RCOND,)h(FERR,)f(BERR,)h(WORK,)107 +b(RWORK,)14 b(INFO)h(\))-205 348 y Fd(Symmetric/Hermitian)215 +b Fc(SPOSVX\()15 b(FACT,)f(UPLO,)28 b(N,)120 b(NRHS,)14 +b(A,)g(LDA,)53 b(AF,)14 b(LDAF,)121 b(EQUED,)14 b(S,)54 +b(B,)13 b(LDB,)i(X,)e(LDX,)h(RCOND,)h(FERR,)f(BERR,)h(WORK,)107 +b(IWORK,)14 b(INFO)h(\))-205 378 y Fd(P)o(ositiv)o(e)e(De\014nite)287 +b Fc(CPOSVX\()15 b(FACT,)f(UPLO,)28 b(N,)120 b(NRHS,)14 +b(A,)g(LDA,)53 b(AF,)14 b(LDAF,)121 b(EQUED,)14 b(S,)54 +b(B,)13 b(LDB,)i(X,)e(LDX,)h(RCOND,)h(FERR,)f(BERR,)h(WORK,)107 +b(RWORK,)14 b(INFO)h(\))-205 436 y Fd(Symmetric/Hermitian)215 +b Fc(SPPSVX\()15 b(FACT,)f(UPLO,)28 b(N,)120 b(NRHS,)14 +b(AP,)106 b(AFP,)187 b(EQUED,)14 b(S,)54 b(B,)13 b(LDB,)i(X,)e(LDX,)h +(RCOND,)h(FERR,)f(BERR,)h(WORK,)107 b(IWORK,)14 b(INFO)h(\))-205 +465 y Fd(P)o(ositiv)o(e)e(De\014nite)d(\(P)o(ac)o(k)o(ed)g(Storage\))50 +b Fc(CPPSVX\()15 b(FACT,)f(UPLO,)28 b(N,)120 b(NRHS,)14 +b(AP,)106 b(AFP,)187 b(EQUED,)14 b(S,)54 b(B,)13 b(LDB,)i(X,)e(LDX,)h +(RCOND,)h(FERR,)f(BERR,)h(WORK,)107 b(RWORK,)14 b(INFO)h(\))-205 +523 y Fd(Symmetric/Hermitian)215 b Fc(SPBSVX\()15 b(FACT,)f(UPLO,)28 +b(N,)14 b(KD,)67 b(NRHS,)14 b(AB,)g(LDAB,)27 b(AFB,)15 +b(LDAFB,)94 b(EQUED,)14 b(S,)54 b(B,)13 b(LDB,)i(X,)e(LDX,)h(RCOND,)h +(FERR,)f(BERR,)h(WORK,)107 b(IWORK,)14 b(INFO)h(\))-205 +552 y Fd(P)o(ositiv)o(e)e(De\014nite)d(Band)207 b Fc(CPBSVX\()15 +b(FACT,)f(UPLO,)28 b(N,)14 b(KD,)67 b(NRHS,)14 b(AB,)g(LDAB,)27 +b(AFB,)15 b(LDAFB,)94 b(EQUED,)14 b(S,)54 b(B,)13 b(LDB,)i(X,)e(LDX,)h +(RCOND,)h(FERR,)f(BERR,)h(WORK,)107 b(RWORK,)14 b(INFO)h(\))-205 +610 y Fd(Symmetric/Hermitian)215 b Fc(SPTSVX\()15 b(FACT,)107 +b(N,)120 b(NRHS,)14 b(D,)g(E,)79 b(DF,)14 b(EF,)319 b(B,)13 +b(LDB,)i(X,)e(LDX,)h(RCOND,)h(FERR,)f(BERR,)h(WORK,)199 +b(INFO)15 b(\))-205 639 y Fd(P)o(ositiv)o(e)e(De\014nite)d(T)n +(ridiagonal)127 b Fc(CPTSVX\()15 b(FACT,)107 b(N,)120 +b(NRHS,)14 b(D,)g(E,)79 b(DF,)14 b(EF,)319 b(B,)13 b(LDB,)i(X,)e(LDX,)h +(RCOND,)h(FERR,)f(BERR,)h(WORK,)107 b(RWORK,)14 b(INFO)h(\))-205 +697 y Fd(Symmetric/Hermitian)215 b Fc(SSYSVX\()15 b(FACT,)f(UPLO,)28 +b(N,)120 b(NRHS,)14 b(A,)g(LDA,)53 b(AF,)14 b(LDAF,)h(IPIV,)213 +b(B,)13 b(LDB,)i(X,)e(LDX,)h(RCOND,)h(FERR,)f(BERR,)h(WORK,)f(LWORK,)h +(IWORK,)f(INFO)h(\))-205 726 y Fd(Inde\014nite)381 b +Fc(CSYSVX\()15 b(FACT,)f(UPLO,)28 b(N,)120 b(NRHS,)14 +b(A,)g(LDA,)53 b(AF,)14 b(LDAF,)h(IPIV,)213 b(B,)13 b(LDB,)i(X,)e(LDX,) +h(RCOND,)h(FERR,)f(BERR,)h(WORK,)f(LWORK,)h(RWORK,)f(INFO)h(\))304 +755 y(CHESVX\()g(FACT,)f(UPLO,)28 b(N,)120 b(NRHS,)14 +b(A,)g(LDA,)53 b(AF,)14 b(LDAF,)h(IPIV,)213 b(B,)13 b(LDB,)i(X,)e(LDX,) +h(RCOND,)h(FERR,)f(BERR,)h(WORK,)f(LWORK,)h(RWORK,)f(INFO)h(\))-205 +813 y Fd(Symmetric/Hermitian)215 b Fc(SSPSVX\()15 b(FACT,)f(UPLO,)28 +b(N,)120 b(NRHS,)14 b(AP,)106 b(AFP,)15 b(IPIV,)279 b(B,)13 +b(LDB,)i(X,)e(LDX,)h(RCOND,)h(FERR,)f(BERR,)h(WORK,)107 +b(IWORK,)14 b(INFO)h(\))-205 842 y Fd(Inde\014nite)10 +b(\(P)o(ac)o(k)o(ed)g(Storage\))144 b Fc(CSPSVX\()15 +b(FACT,)f(UPLO,)28 b(N,)120 b(NRHS,)14 b(AP,)106 b(AFP,)15 +b(IPIV,)279 b(B,)13 b(LDB,)i(X,)e(LDX,)h(RCOND,)h(FERR,)f(BERR,)h +(WORK,)107 b(RWORK,)14 b(INFO)h(\))304 871 y(CHPSVX\()g(FACT,)f(UPLO,) +28 b(N,)120 b(NRHS,)14 b(AP,)106 b(AFP,)15 b(IPIV,)279 +b(B,)13 b(LDB,)i(X,)e(LDX,)h(RCOND,)h(FERR,)f(BERR,)h(WORK,)107 +b(RWORK,)14 b(INFO)h(\))-145 1026 y Fe(Divide)f(and)f(Conquer)g(and)g +(Exp)q(ert)f(Driv)o(er)i(Routines)f(for)h(Linear)g(Least)f(Squares)f +(Problems)-205 1115 y Fd(Problem)e(T)o(yp)q(e)271 b(Routine)p +-229 1125 1676 2 v -205 1145 a(Solv)o(e)11 b(Using)h(Orthogonal)g(F)n +(actor)49 b Fc(SGELSY\()15 b(M,)f(N,)f(NRHS,)i(A,)e(LDA,)i(B,)e(LDB,)h +(JPVT,)h(RCOND,)f(RANK,)h(WORK,)f(LWORK,)200 b(INFO)14 +b(\))257 1174 y(CGELSY\()h(M,)f(N,)f(NRHS,)i(A,)e(LDA,)i(B,)e(LDB,)h +(JPVT,)h(RCOND,)f(RANK,)h(WORK,)f(LWORK,)h(RWORK,)107 +b(INFO)14 b(\))-205 1232 y Fd(Solv)o(e)d(Using)h(SVD,)225 +b Fc(SGELSS\()15 b(M,)f(N,)f(NRHS,)i(A,)e(LDA,)i(B,)e(LDB,)54 +b(S,)14 b(RCOND,)g(RANK,)h(WORK,)f(LWORK,)200 b(INFO)14 +b(\))-205 1262 y Fd(Allo)o(wing)g(for)c(Rank-De\014ciency)69 +b Fc(CGELSS\()15 b(M,)f(N,)f(NRHS,)i(A,)e(LDA,)i(B,)e(LDB,)54 +b(S,)14 b(RCOND,)g(RANK,)h(WORK,)f(LWORK,)h(RWORK,)107 +b(INFO)14 b(\))-205 1320 y Fd(Solv)o(e)d(Using)h(D&C)e(SVD,)147 +b Fc(SGELSD\()15 b(M,)f(N,)f(NRHS,)i(A,)e(LDA,)i(B,)e(LDB,)54 +b(S,)14 b(RCOND,)g(RANK,)h(WORK,)f(LWORK,)107 b(IWORK,)15 +b(INFO)f(\))-205 1349 y Fd(Allo)o(wing)g(for)c(Rank-De\014ciency)69 +b Fc(CGELSD\()15 b(M,)f(N,)f(NRHS,)i(A,)e(LDA,)i(B,)e(LDB,)54 +b(S,)14 b(RCOND,)g(RANK,)h(WORK,)f(LWORK,)h(RWORK,)f(IWORK,)h(INFO)f +(\))p eop +%%Page: 4 4 +4 3 bop 150 -123 a Fe(Exp)q(ert)12 b(and)h(RRR)i(Driv)o(er)e(Routines)h +(for)g(Standard)d(and)h(Generalized)g(Symmetric)f(Eigen)o(v)n(alue)i +(Problems)-205 -35 y Fd(Matrix/Problem)f(T)o(yp)q(e)125 +b(Routine)p -229 -25 2497 2 v -205 -5 a(Symmetric/Hermitian)k +Fc(SSYEVX\()15 b(JOBZ,)f(RANGE,)h(UPLO,)f(N,)67 b(A,)14 +b(LDA,)146 b(VL,)14 b(VU,)g(IL,)g(IU,)g(ABSTOL,)h(M,)e(W,)h(Z,)g(LDZ,) +199 b(WORK,)14 b(LWORK,)214 b(IWORK,)14 b(IFAIL,)h(INFO)f(\))-205 +25 y Fd(Eigen)o(v)n(alues/v)o(ectors)161 b Fc(CHEEVX\()15 +b(JOBZ,)f(RANGE,)h(UPLO,)f(N,)67 b(A,)14 b(LDA,)146 b(VL,)14 +b(VU,)g(IL,)g(IU,)g(ABSTOL,)h(M,)e(W,)h(Z,)g(LDZ,)199 +b(WORK,)14 b(LWORK,)h(RWORK,)121 b(IWORK,)14 b(IFAIL,)h(INFO)f(\))218 +83 y(SSYEVR\()h(JOBZ,)f(RANGE,)h(UPLO,)f(N,)67 b(A,)14 +b(LDA,)146 b(VL,)14 b(VU,)g(IL,)g(IU,)g(ABSTOL,)h(M,)e(W,)h(Z,)g(LDZ,) +93 b(ISUPPZ,)15 b(WORK,)f(LWORK,)214 b(IWORK,)14 b(LIWORK,)h(INFO)f(\)) +218 112 y(CHEEVR\()h(JOBZ,)f(RANGE,)h(UPLO,)f(N,)67 b(A,)14 +b(LDA,)146 b(VL,)14 b(VU,)g(IL,)g(IU,)g(ABSTOL,)h(M,)e(W,)h(Z,)g(LDZ,) +93 b(ISUPPZ,)15 b(WORK,)f(LWORK,)h(RWORK,)g(LRWORK,)g(IWORK,)f(LIWORK,) +h(INFO)f(\))218 170 y(SSYGVX\()h(ITYPE,)f(JOBZ,)h(RANGE,)f(UPLO,)h(N,) +66 b(A,)14 b(LDA,)g(B,)g(LDB,)41 b(VL,)14 b(VU,)f(IL,)h(IU,)g(ABSTOL,)h +(M,)f(W,)g(Z,)f(LDZ,)107 b(WORK,)14 b(LWORK,)214 b(IWORK,)14 +b(IFAIL,)h(INFO)f(\))218 199 y(CHEGVX\()h(ITYPE,)f(JOBZ,)h(RANGE,)f +(UPLO,)h(N,)66 b(A,)14 b(LDA,)g(B,)g(LDB,)41 b(VL,)14 +b(VU,)f(IL,)h(IU,)g(ABSTOL,)h(M,)f(W,)g(Z,)f(LDZ,)107 +b(WORK,)14 b(LWORK,)h(RWORK,)121 b(IWORK,)14 b(IFAIL,)h(INFO)f(\))-205 +257 y Fd(Symmetric/Hermitian)129 b Fc(SSPEVX\()15 b(JOBZ,)f(RANGE,)h +(UPLO,)f(N,)67 b(AP,)199 b(VL,)14 b(VU,)g(IL,)g(IU,)g(ABSTOL,)h(M,)e +(W,)h(Z,)g(LDZ,)199 b(WORK,)306 b(IWORK,)14 b(IFAIL,)h(INFO)f(\))-205 +286 y Fd(\(P)o(ac)o(k)o(ed)d(Storage\))195 b Fc(CHPEVX\()15 +b(JOBZ,)f(RANGE,)h(UPLO,)f(N,)67 b(AP,)199 b(VL,)14 b(VU,)g(IL,)g(IU,)g +(ABSTOL,)h(M,)e(W,)h(Z,)g(LDZ,)199 b(WORK,)107 b(RWORK,)121 +b(IWORK,)14 b(IFAIL,)h(INFO)f(\))-205 315 y Fd(Eigen)o(v)n(alues/v)o +(ectors)218 344 y Fc(SSPGVX\()h(ITYPE,)f(JOBZ,)h(RANGE,)f(UPLO,)h(N,)66 +b(AP,)14 b(BP,)147 b(VL,)14 b(VU,)f(IL,)h(IU,)g(ABSTOL,)h(M,)f(W,)g(Z,) +f(LDZ,)107 b(WORK,)306 b(IWORK,)14 b(IFAIL,)h(INFO)f(\))218 +373 y(CHPGVX\()h(ITYPE,)f(JOBZ,)h(RANGE,)f(UPLO,)h(N,)66 +b(AP,)14 b(BP,)147 b(VL,)14 b(VU,)f(IL,)h(IU,)g(ABSTOL,)h(M,)f(W,)g(Z,) +f(LDZ,)107 b(WORK,)g(RWORK,)121 b(IWORK,)14 b(IFAIL,)h(INFO)f(\))-205 +431 y Fd(Symmetric/Hermitian)9 b(Band)50 b Fc(SSBEVX\()15 +b(JOBZ,)f(RANGE,)h(UPLO,)f(N,)g(KD,)g(AB,)g(LDAB,)g(Q,)g(LDQ,)g(VL,)g +(VU,)g(IL,)g(IU,)g(ABSTOL,)h(M,)e(W,)h(Z,)g(LDZ,)199 +b(WORK,)306 b(IWORK,)14 b(IFAIL,)h(INFO)f(\))-205 460 +y Fd(Eigen)o(v)n(alues/v)o(ectors)161 b Fc(CHBEVX\()15 +b(JOBZ,)f(RANGE,)h(UPLO,)f(N,)g(KD,)g(AB,)g(LDAB,)g(Q,)g(LDQ,)g(VL,)g +(VU,)g(IL,)g(IU,)g(ABSTOL,)h(M,)e(W,)h(Z,)g(LDZ,)199 +b(WORK,)107 b(RWORK,)121 b(IWORK,)14 b(IFAIL,)h(INFO)f(\))218 +519 y(SSBGVX\()h(JOBZ,)f(RANGE,)h(UPLO,)f(N,)g(KA,)g(KB,)g(AB,)g(LDAB,) +g(BB,)g(LDBB,)g(Q,)g(LDQ,)g(VL,)g(VU,)g(IL,)g(IU,)g(ABSTOL,)h(M,)e(W,)h +(Z,)g(LDZ,)g(WORK,)306 b(IWORK,)14 b(IFAIL,)h(INFO)f(\))218 +548 y(CHBGVX\()h(JOBZ,)f(RANGE,)h(UPLO,)f(N,)g(KA,)g(KB,)g(AB,)g(LDAB,) +g(BB,)g(LDBB,)g(Q,)g(LDQ,)g(VL,)g(VU,)g(IL,)g(IU,)g(ABSTOL,)h(M,)e(W,)h +(Z,)g(LDZ,)g(WORK,)107 b(RWORK,)121 b(IWORK,)14 b(IFAIL,)h(INFO)f(\)) +-205 606 y Fd(Symmetric)8 b(T)n(ridiagonal)120 b Fc(SSTEVX\()15 +b(JOBZ,)f(RANGE,)94 b(N,)67 b(D,)14 b(E,)172 b(VL,)14 +b(VU,)g(IL,)g(IU,)g(ABSTOL,)h(M,)e(W,)h(Z,)g(LDZ,)199 +b(WORK,)306 b(IWORK,)14 b(IFAIL,)h(INFO)f(\))-205 635 +y Fd(Eigen)o(v)n(alues/v)o(ectors)218 664 y Fc(SSTEVR\()h(JOBZ,)f +(RANGE,)94 b(N,)67 b(D,)14 b(E,)172 b(VL,)14 b(VU,)g(IL,)g(IU,)g +(ABSTOL,)h(M,)e(W,)h(Z,)g(LDZ,)93 b(ISUPPZ,)15 b(WORK,)f(LWORK,)214 +b(IWORK,)14 b(LIWORK,)h(INFO)f(\))387 819 y Fe(Exp)q(ert)f(Driv)o(er)h +(Routines)f(for)h(Standard)d(and)i(Generalized)f(Nonsymmetric)f(Eigen)o +(v)n(alue)h(Problems)-205 907 y Fd(Problem)e(T)o(yp)q(e)49 +b(Routine)p -229 917 2857 2 v -205 938 a(Sc)o(h)o(ur)165 +b Fc(SGEESX\()15 b(JOBVS,)g(SORT,)f(SELECT,)h(SENSE,)28 +b(N,)14 b(A,)f(LDA,)i(SDIM,)27 b(WR,)14 b(WI,)g(VS,)g(LDVS,)464 +b(RCONDE,)15 b(RCONDV,)319 b(WORK,)15 b(LWORK,)107 b(IWORK,)15 +b(LIWORK,)f(BWORK,)h(INFO)f(\))-205 967 y Fd(F)n(actorization)65 +b Fc(CGEESX\()15 b(JOBVS,)g(SORT,)f(SELECT,)h(SENSE,)28 +b(N,)14 b(A,)f(LDA,)i(SDIM,)27 b(W,)80 b(VS,)14 b(LDVS,)464 +b(RCONDE,)15 b(RCONDV,)319 b(WORK,)15 b(LWORK,)f(RWORK,)213 +b(BWORK,)15 b(INFO)f(\))35 1025 y(SGGESX\()h(JOBVSL,)g(JOBVSR,)g(SORT,) +f(SELCTG,)h(SENSE,)28 b(N,)14 b(A,)g(LDA,)g(B,)f(LDB,)i(SDIM,)f +(ALPHAR,)h(ALPHAI,)g(BETA,)f(VSL,)g(LDVSL,)h(VSR,)f(LDVSR,)h(RCONDE,)f +(RCONDV,)187 b(WORK,)15 b(LWORK,)107 b(IWORK,)15 b(LIWORK,)f(BWORK,)h +(INFO)f(\))35 1054 y(CGGESX\()h(JOBVSL,)g(JOBVSR,)g(SORT,)f(SELCTG,)h +(SENSE,)28 b(N,)14 b(A,)g(LDA,)g(B,)f(LDB,)i(SDIM,)f(ALPHAR,)h(ALPHAI,) +g(BETA,)f(VSL,)g(LDVSL,)h(VSR,)f(LDVSR,)h(RCONDE,)f(RCONDV,)187 +b(WORK,)15 b(LWORK,)f(RWORK,)h(IWORK,)g(LIWORK,)f(BWORK,)h(INFO)f(\)) +-205 1112 y Fd(Eigen)o(v)n(alues/)72 b Fc(SGEEVX\()15 +b(BALANC,)g(JOBVL,)g(JOBVR,)f(SENSE,)h(N,)f(A,)f(LDA,)107 +b(WR,)14 b(WI,)g(VL,)g(LDVL,)g(VR,)g(LDVR,)h(ILO,)f(IHI,)g(SCALE,)h +(ABNRM,)f(RCONDE,)h(RCONDV,)425 b(WORK,)15 b(LWORK,)107 +b(IWORK,)g(INFO)14 b(\))-205 1141 y Fd(v)o(ectors)146 +b Fc(CGEEVX\()15 b(BALANC,)g(JOBVL,)g(JOBVR,)f(SENSE,)h(N,)f(A,)f(LDA,) +107 b(W,)80 b(VL,)14 b(LDVL,)g(VR,)g(LDVR,)h(ILO,)f(IHI,)g(SCALE,)h +(ABNRM,)f(RCONDE,)h(RCONDV,)425 b(WORK,)15 b(LWORK,)f(RWORK,)200 +b(INFO)14 b(\))35 1199 y(SGGEVX\()h(BALANC,)g(JOBVL,)g(JOBVR,)f(SENSE,) +h(N,)f(A,)f(LDA,)i(B,)e(LDB,)h(ALPHAR,)h(ALPHAI,)g(BETA,)g(VL,)e(LDVL,) +i(VR,)f(LDVR,)g(ILO,)g(IHI,)g(LSCALE,)h(RSCALE,)g(ABNRM,)g(BBNRM,)f +(RCONDE,)h(RCONDV,)g(WORK,)g(LWORK,)107 b(IWORK,)14 b(BWORK,)h(INFO)f +(\))35 1228 y(CGGEVX\()h(BALANC,)g(JOBVL,)g(JOBVR,)f(SENSE,)h(N,)f(A,)f +(LDA,)i(B,)e(LDB,)h(ALPHAR,)h(ALPHAI,)g(BETA,)g(VL,)e(LDVL,)i(VR,)f +(LDVR,)g(ILO,)g(IHI,)g(LSCALE,)h(RSCALE,)g(ABNRM,)g(BBNRM,)f(RCONDE,)h +(RCONDV,)g(WORK,)g(LWORK,)f(RWORK,)h(IWORK,)f(BWORK,)h(INFO)f(\))1095 +1373 y Fe(Meaning)g(of)g(pre\014xes)921 1442 y Fb(Routines)9 +b(b)q(eginning)g(with)j(\\S")e(are)h(a)o(v)n(ailable)e(in:)1200 +1512 y Fa(S)17 b(-)h(REAL)1094 1551 y(D)f(-)h(DOUBLE)d(PRECISION)918 +1591 y Fb(Routines)9 b(b)q(eginning)g(with)j(\\C")f(are)g(a)o(v)n +(ailable)e(in:)1173 1660 y Fa(C)18 b(-)f(COMPLEX)1147 +1700 y(Z)g(-)h(COMPLEX*1)o(6)831 1739 y Fb(Note:)d Fa(COMPLEX*16)8 +b Fb(ma)o(y)j(not)g(b)q(e)g(supp)q(orted)d(b)o(y)j(all)g(mac)o(hines)p +eop +%%Trailer +end +userdict /end-hook known{end-hook}if +%%EOF diff --git a/sources b/sources new file mode 100644 index 0000000..50764ac --- /dev/null +++ b/sources @@ -0,0 +1,2 @@ +SHA512 (manpages.tgz) = 75184bbfac90f46e4fbb25b341f00c260904b5f3174084022c53c686a5181bb678fc1ec948f419d33890cb5769a60ca1000964e4bf6cc89438552e8b8fe45f2a +SHA512 (v3.9.0.tar.gz) = 424956ad941a60a4b71e0d451ad48db12a692f8a71a90f3ca7f71d6ecc1922f392746ea84df1c47a46577ed2db32e9e47ec44ad248207c5ac7da179becb712ef