Skip to content
Snippets Groups Projects
Commit 191e617e authored by zhang's avatar zhang
Browse files

1) Add feature to read virtual source of coupling for soleil lattice.

2) Add feature to call COD correction.
3)  Add fmap_p( ), fmapdp_p( ), momacceptance_p( ), to do parallel
     computation of fmap, fmapdp, momacceptance.
4) Add feature to select parallel or non-parallel computation depending
   on the values of MPI_EXEC.
parent 1c95b76c
Branches
Tags
No related merge requests found
......@@ -9,7 +9,17 @@ int no_tps = ORDER; // arbitrary TPSA order is defined locally
extern bool freq_map;
#include "tracy_lib.h"
#if HAVE_CONFIG_H
#include <config.h>
#endif
#if MPI_EXEC
//library of parallel computation,must be before "stdio.h"
#include <mpi.h>
#endif
#include "tracy_lib.h"
//***************************************************************************************
//
......@@ -32,6 +42,7 @@ int main(int argc, char *argv[]) {
/* parameters to read the user input script .prm */
long i=0L; //initialize the for loop to read command string
long j=0L; //initialize the for loop to read the files from parallel computing
char CommandStr[max_str];
double nux = 0.0, nuy = 0.0, ksix = 0.0, ksiy = 0.0;
bool chroma=true;
......@@ -46,7 +57,10 @@ int main(int argc, char *argv[]) {
const int NCOMMAND = 500;
UserCommand UserCommandFlag[NCOMMAND];
#if MPI_EXEC
//Initialize parallel computation
MPI_Init(&argc, &argv);
#endif
/************************************************************************
read in files and flags
*************************************************************************/
......@@ -126,6 +140,13 @@ int main(int argc, char *argv[]) {
cout << "globval.quad_fringe is " << globval.quad_fringe << "\n";
}
//deactive quadrupole fringe field
else if(strcmp(CommandStr,"QuadFringeOffFlag") == 0) {
globval.quad_fringe = false;
cout << "\n";
cout << "globval.quad_fringe is " << globval.quad_fringe << "\n";
}
//set RF voltage
else if(strcmp(CommandStr,"RFvoltageFlag") == 0) {
printf("\nSetting RF voltage:\n");
......@@ -141,133 +162,60 @@ int main(int argc, char *argv[]) {
ReadCh(UserCommandFlag[i].chamber_file); /* read vacuum chamber from a file "Apertures.dat" , soleil version*/
PrintCh(); // print chamber into chamber.out
}
// read the misalignment errors to the elements, then do COD correction
// using SVD method.
// read the misalignment errors to the elements
// Based on the function error_and_correction() in nsls-ii_lib_templ.h
// else if(strcmp(ReadaefileFlag, "") != 0){
else if (strcmp(CommandStr,"ReadaefileFlag") == 0) {
// ***** Apply corrections and output flatfile for n_stat sets of random #'s
bool cod = true;
int k, icod=0;
FILE *hOrbitFile, *vOrbitFile ;
int hcorrIdx[nCOR], vcorrIdx[nCOR]; //list of corr for orbit correction
//initialize the corrector list
for ( k = 0; k < nCOR; k++){
hcorrIdx[k] = -1;
vcorrIdx[k] = -1;
}
//Get response matrix between bpm and correctors, and then print the SVD setting to the files
if (n_orbit!=0 || n_scale!=0) {
// select correctors to be used
readCorrectorList(hcorr_file, vcorr_file, hcorrIdx, vcorrIdx);
fprintf(stdout, "\n\nSVD correction setting:\n");
fprintf(stdout, "H-plane %d singular values:\n", nwh);
fprintf(stdout, "V-plane %d singular values:\n\n",nwv);
// compute beam response matrix
printf("\n");
printf("Computing beam response matrix\n");
//get the response matrix between bpm and correctors
gcmats(globval.bpm, globval.hcorr, 1, hcorrIdx);
gcmats(globval.bpm, globval.vcorr, 2, vcorrIdx);
/* gcmat(globval.bpm, globval.hcorr, 1);
gcmat(globval.bpm, globval.vcorr, 2);*/
// print response matrices to files 'svdh.out' and file 'svdv.out'
prt_gcmat(globval.bpm, globval.hcorr, 1);
prt_gcmat(globval.bpm, globval.vcorr, 2);
}
//print the statistics of orbit in file 'OrbScanFile.out'
OrbScanFile = file_write(OrbScanFileName);
//write files with orbits at all element locations
hOrbitFile = file_write(hOrbitFileName);
vOrbitFile = file_write(vOrbitFileName);
fprintf(hOrbitFile, "# First line: s-location (m) \n");
fprintf(hOrbitFile, "# After orbit correction: Horizontal closed orbit at all element locations (with %3d BPMs) at different loop\n", GetnKid(globval.bpm));
fprintf(vOrbitFile, "# First line s-location (m) \n");
fprintf(vOrbitFile, "# After orbit correction: Vertical closed orbit at all element locations (with %3d BPMs) at different loop\n", GetnKid(globval.bpm));
for (k = 0; k < globval.Cell_nLoc; k++){
fprintf(hOrbitFile, "% 9.3e ", Cell[k].S);
fprintf(vOrbitFile, "% 9.3e ", Cell[k].S);
} // end for
fprintf(hOrbitFile, "\n");
fprintf(vOrbitFile, "\n");
// load misalignments
cout << "Read misalignment errors from file: " << UserCommandFlag[i].ae_file << endl;
LoadAlignTol(UserCommandFlag[i].ae_file, true, 1.0, true, 0);
Ring_GetTwiss(true, 0.0);
}
//do COD correction using SVD method.
else if(strcmp(CommandStr,"CODCorrectFlag") == 0) {
cout << "Begin Closed Orbit correction: " << endl;
for (k = 1; k <= n_stat; k++) {// loop for n_stat sets of errors
if (n_orbit!=0 || n_scale!=0) {
cod =
CorrectCOD_Ns(hOrbitFile, vOrbitFile, UserCommandFlag[i].ae_file,
n_orbit,n_scale,k,nwh,nwv, hcorrIdx, vcorrIdx);
/* cod = CorrectCOD_N(ae_file, n_orbit, n_scale, k);*/
printf("\n");
if (cod){
/* printf("done with orbit correction, now do coupling",
" correction plus vert. disp\n");*/
if(n_orbit == 0)
printf("iter # %3d n_obit = 0, no orbit correction \n",k);
else
printf("iter # %3d Orbit correction succeeded\n", k);
}
else
if(!cod){
icod = icod + 1;
fprintf(stdout, "!!! iter # %3d error_and_correction\n",k);
}
//chk_cod(cod, "iter # %3d error_and_correction");
}
// Ring_GetTwiss(chroma, dp);
Ring_GetTwiss(true, 0.0);
// for debugging
//print flat lattice
//sprintf(mfile_name, "flat_file.%03d.dat",k);
//prtmfile(mfile_name);
}
if(n_orbit < 1){
cout << "interation number n_obit should NOT small than 1!!!" << endl;
exit_(1);
}
fprintf(stdout, "Number of unstable orbits %d/%d", icod, n_stat);
prt_cod("corr_after.out", globval.bpm, true);
// close file giving orbit at BPM location
fclose(hOrbitFile);
fclose(vOrbitFile);
fclose(OrbScanFile);
CODCorrect(hcorr_file,vcorr_file, n_orbit,nwh,nwv);
Ring_GetTwiss(true, 0.0);
prt_cod("summary_miserr_codcorr.out", globval.bpm, true);
}
// set the field error into the lattice
// The corresponding field error is replaced by the new value.
// This feature is generic, works for all lattices
else if (strcmp(CommandStr,"ReadfefileFlag") == 0) {
fprintf(stdout,"\n Read field error from fe_file: %s \n", UserCommandFlag[i].fe_file);
else if (strcmp(CommandStr,"ReadfefileFlag") == 0) {
fprintf(stdout,"\n Read multipole field errors from file: %s \n", UserCommandFlag[i].fe_file);
LoadFieldErrs(UserCommandFlag[i].fe_file, true, 1.0, true, 1);
Ring_GetTwiss(true, 0.0);
prtmfile("flat_file_fefile.dat");
}
// read multipole errors from a file; specific for soleil lattice
else if(strcmp(CommandStr,"ReadMultipoleFlag") == 0) {
fprintf(stdout,"\n Read Multipoles file for lattice with thick sextupoles, specific for SOLEIL lattice: \n");
fprintf(stdout,"\n Read Multipoles field error for SOLEIL lattice with thick sextupoles, from file:\n");
fprintf(stdout," %s\n",multipole_file);
ReadFieldErr(multipole_file);
//first print the full lattice with error as a flat file
prtmfile("flat_file_errmultipole.dat"); // writes flat file with all element errors /* very important file for debug*/
Ring_GetTwiss(chroma = true, 0.0); /* Compute and get Twiss parameters */
printglob();
}
// read the sources of coupling from a file; for soleil lattice
else if(strcmp(CommandStr,"ReadVirtualSkewquadFlag") == 0) {
fprintf(stdout,"\n Read virtual skew quadrupole setting of SOLEIL lattice from file:\n");
fprintf(stdout," %s \n",virtualskewquad_file);
ReadVirtualSkewQuad(virtualskewquad_file);
//first print the full lattice with sources of coupling as a flat file
prtmfile("flat_file_skewquad.dat"); /* very important file for debug*/
//get the coupling
Coupling_Edwards_Teng();
}
//print the twiss paramaters in a file defined by the name
else if(strcmp(CommandStr,"PrintTwissFlag") == 0) {
......@@ -431,8 +379,8 @@ int main(int argc, char *argv[]) {
printlatt("linlat_coupling.out"); /* dump linear lattice functions into "linlat.dat" */
// GetEmittance(ElemIndex("cav"), true); //only for test
Coupling_Edwards_Teng();
Ring_GetTwiss(chroma = true, 0.0); /* Compute and get Twiss parameters */
printglob(); /* print parameter list */
// Ring_GetTwiss(chroma = true, 0.0); /* Compute and get Twiss parameters */
// printglob(); /* print parameter list */
}
// add coupling by random rotating of the full quadrupole magnets
......@@ -492,27 +440,162 @@ int main(int argc, char *argv[]) {
// Computes FMA
else if(strcmp(CommandStr,"FmapFlag") == 0) {
printf("\n begin Fmap calculation for on momentum particles: \n");
fmap(UserCommandFlag[i]._FmapFlag_fmap_file,
UserCommandFlag[i]._FmapFlag_nxpoint,
UserCommandFlag[i]._FmapFlag_nypoint,
UserCommandFlag[i]._FmapFlag_nturn,
UserCommandFlag[i]._FmapFlag_xmax,
UserCommandFlag[i]._FmapFlag_ymax,
UserCommandFlag[i]._FmapFlag_delta,
UserCommandFlag[i]._FmapFlag_diffusion);
#if MPI_EXEC
//initialization for parallel computing
int myid=0, numprocs=0;
int namelen=0;
char processor_name[MPI_MAX_PROCESSOR_NAME]="";
MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
MPI_Comm_rank(MPI_COMM_WORLD, &myid);
MPI_Get_processor_name(processor_name, &namelen);
printf("\n Beginning parallel computing: %d of %d is on %s\n", myid, numprocs, processor_name);
printf("\n begin Fmap calculation for on momentum particles: \n");
//Each core or process, characterized by myid, will track different region of fmap
fmap_p(UserCommandFlag[i]._FmapFlag_fmap_file,
UserCommandFlag[i]._FmapFlag_nxpoint,
UserCommandFlag[i]._FmapFlag_nypoint,
UserCommandFlag[i]._FmapFlag_nturn,
UserCommandFlag[i]._FmapFlag_xmax,
UserCommandFlag[i]._FmapFlag_ymax,
UserCommandFlag[i]._FmapFlag_delta,
UserCommandFlag[i]._FmapFlag_diffusion,
numprocs,myid);
//Synchronize all cores, till all cores finish fmap of different region,then all cores will proceed.
MPI_Barrier(MPI_COMM_WORLD);
//Collecting data from all files generated by cores, then write to fmap_p.out
if(myid==0)
{
// system("cp 0fmap.out fmap_p.out");
FILE *outf;
if ((outf = fopen(UserCommandFlag[i]._FmapFlag_fmap_file, "w")) == NULL)
{
fprintf(stdout, "psoltracy: error while opening file %s\n", UserCommandFlag[i]._FmapFlag_fmap_file);
exit_(1);
}
FILE *fp;
char buffer[81];
char FmapFile[50];
for(int j=0; j<numprocs; j++)
{
FmapFile[0]='\0';
sprintf(FmapFile,"%d",j);
strcat(FmapFile,UserCommandFlag[i]._FmapFlag_fmap_file);
if((fp = fopen(FmapFile, "r")) == NULL)
{
fprintf(stdout, "%s: error while opening file.\n", FmapFile);
exit_(1);
}
while(fgets(buffer, 80, fp) != NULL)
{
fputs(buffer, outf);
buffer[0]='\0';
}
fclose(fp);
}
fclose(outf);
}
MPI_Barrier(MPI_COMM_WORLD);
printf("Process %d finished for on momentum fmap.\n", myid);
#else
fmap(UserCommandFlag[i]._FmapFlag_fmap_file,
UserCommandFlag[i]._FmapFlag_nxpoint,
UserCommandFlag[i]._FmapFlag_nypoint,
UserCommandFlag[i]._FmapFlag_nturn,
UserCommandFlag[i]._FmapFlag_xmax,
UserCommandFlag[i]._FmapFlag_ymax,
UserCommandFlag[i]._FmapFlag_delta,
UserCommandFlag[i]._FmapFlag_diffusion);
#endif
}
// Compute FMA dp
else if(strcmp(CommandStr,"FmapdpFlag") == 0) {
printf("\n begin Fmap calculation for off momentum particles: \n");
fmapdp(UserCommandFlag[i]._FmapdpFlag_fmapdp_file,
UserCommandFlag[i]._FmapdpFlag_nxpoint,
UserCommandFlag[i]._FmapdpFlag_nepoint,
UserCommandFlag[i]._FmapdpFlag_nturn,
UserCommandFlag[i]._FmapdpFlag_xmax,
UserCommandFlag[i]._FmapdpFlag_emax,
UserCommandFlag[i]._FmapdpFlag_z,
UserCommandFlag[i]._FmapdpFlag_diffusion);
#if MPI_EXEC
//initialization for parallel computing
int myid=0, numprocs=0;
int namelen=0;
char processor_name[MPI_MAX_PROCESSOR_NAME]="";
MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
MPI_Comm_rank(MPI_COMM_WORLD, &myid);
MPI_Get_processor_name(processor_name, &namelen);
printf("Beginning parallel computing: %d of %d is on %s\n", myid, numprocs, processor_name);
printf("\n begin Fmap calculation for off momentum particles: \n");
fmapdp_p(UserCommandFlag[i]._FmapdpFlag_fmapdp_file,
UserCommandFlag[i]._FmapdpFlag_nxpoint,
UserCommandFlag[i]._FmapdpFlag_nepoint,
UserCommandFlag[i]._FmapdpFlag_nturn,
UserCommandFlag[i]._FmapdpFlag_xmax,
UserCommandFlag[i]._FmapdpFlag_emax,
UserCommandFlag[i]._FmapdpFlag_z,
UserCommandFlag[i]._FmapdpFlag_diffusion,
numprocs,myid);
//Synchronize all cores, till all cores finish fmap of different region,then all cores will proceed.
MPI_Barrier(MPI_COMM_WORLD);
//Collecting data from all files generated by cores, then write to fmap_p.out
if(myid==0)
{
// system("cp 0fmap.out fmap_p.out");
FILE *outf;
if ((outf = fopen(UserCommandFlag[i]._FmapdpFlag_fmapdp_file, "w")) == NULL)
{
fprintf(stdout, "psoltracy: error while opening file %s\n", UserCommandFlag[i]._FmapdpFlag_fmapdp_file);
exit_(1);
}
FILE *fp;
char buffer[81];
char FmapFile[50];
for(int j=0; j<numprocs; j++)
{
FmapFile[0]='\0';
sprintf(FmapFile,"%d",j);
strcat(FmapFile,UserCommandFlag[i]._FmapdpFlag_fmapdp_file);
if((fp = fopen(FmapFile, "r")) == NULL)
{
fprintf(stdout, "%s: error while opening file.\n", FmapFile);
exit_(1);
}
while(fgets(buffer, 80, fp) != NULL)
{
fputs(buffer, outf);
buffer[0]='\0';
}
fclose(fp);
}
fclose(outf);
}
MPI_Barrier(MPI_COMM_WORLD);
printf("Process %d finished off momentum fmap.\n", myid);
#else
fmapdp(UserCommandFlag[i]._FmapdpFlag_fmapdp_file,
UserCommandFlag[i]._FmapdpFlag_nxpoint,
UserCommandFlag[i]._FmapdpFlag_nepoint,
UserCommandFlag[i]._FmapdpFlag_nturn,
UserCommandFlag[i]._FmapdpFlag_xmax,
UserCommandFlag[i]._FmapdpFlag_emax,
UserCommandFlag[i]._FmapdpFlag_z,
UserCommandFlag[i]._FmapdpFlag_diffusion);
#endif
}
// // if (CodeComparaisonFlag) {
......@@ -539,9 +622,23 @@ int main(int argc, char *argv[]) {
exit_(1);
};
/* calculate momentum acceptance*/
printf("\n Calculate momentum acceptance: \n");
#if MPI_EXEC
/* calculate momentum acceptance*/
//initialization for parallel computing
int myid=0, numprocs=0;
int namelen=0;
char processor_name[MPI_MAX_PROCESSOR_NAME]="";
MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
MPI_Comm_rank(MPI_COMM_WORLD, &myid);
MPI_Get_processor_name(processor_name, &namelen);
printf("Beginning parallel computing: %d of %d is on %s\n", myid, numprocs, processor_name);
printf("\n Calculate momentum acceptance: \n");
MomentumAcceptance(UserCommandFlag[i]._MomentumAccFlag_momacc_file,
MomentumAcceptance_p(UserCommandFlag[i]._MomentumAccFlag_momacc_file,
UserCommandFlag[i]._MomentumAccFlag_istart,
UserCommandFlag[i]._MomentumAccFlag_istop,
UserCommandFlag[i]._MomentumAccFlag_deltaminp,
......@@ -551,7 +648,127 @@ int main(int argc, char *argv[]) {
UserCommandFlag[i]._MomentumAccFlag_deltamaxn,
UserCommandFlag[i]._MomentumAccFlag_nstepn,
UserCommandFlag[i]._MomentumAccFlag_nturn,
UserCommandFlag[i]._MomentumAccFlag_zmax);
UserCommandFlag[i]._MomentumAccFlag_zmax,
numprocs,myid);
//merge the files
//Synchronize all cores, till finish cal of different region,
//then files from the cores will be processed.
MPI_Barrier(MPI_COMM_WORLD);
//Collect data from all files generated by different cores, then write to user defined file
if(myid==0)
{
//merge the phase.out from different cpus
FILE *outf1; //phase.out, for debugging
FILE *fp1;
char buffer1[81];
char PhaseFile[50];
if ((outf1 = fopen("phase_p.out", "w")) == NULL)
{
fprintf(stdout, "psoltracy: error while opening file %s\n", "phase_p.out");
exit_(1);
}
for(int j=0; j<numprocs; j++)
{
PhaseFile[0]='\0';
sprintf(PhaseFile,"%d",j);
strcat(PhaseFile,"phase.out");
if((fp1 = fopen(PhaseFile, "r")) == NULL){
fprintf(stdout, "%s: error while opening file.\n", PhaseFile);
exit_(1);
}
while(fgets(buffer1, 80, fp1) != NULL)
{
fputs(buffer1, outf1);
buffer1[0]='\0';
}
fclose(fp1);
}
//collect data for the momentum acceptance
FILE *outf2; //momentum acceptance
FILE *fp2;
char buffer2[81];
char MAFile[50];
if ((outf2 = fopen(UserCommandFlag[i]._MomentumAccFlag_momacc_file, "w")) == NULL)
{
fprintf(stdout, "psoltracy: error while opening file %s\n", UserCommandFlag[i]._MomentumAccFlag_momacc_file);
exit_(1);
}
//Collect data in Positive region
for(int j=0; j<numprocs; j++)
{
MAFile[0]='\0';
sprintf(MAFile,"%d",j);
strcat(MAFile,UserCommandFlag[i]._MomentumAccFlag_momacc_file);
if((fp2 = fopen(MAFile, "r")) == NULL)
{
fprintf(stdout, "%s: error while opening file.\n", MAFile);
exit_(1);
}
while(fgets(buffer2, 80, fp2) != NULL)
{
if(strstr(buffer2,"Negative")!=0) break;
fputs(buffer2, outf2);
buffer2[0]='\0'; //reset buffer to NULL
}
buffer2[0]='\0';
fclose(fp2);
}
fprintf(outf2,"\n"); // A void line
//Collect data in Nagative region
for(int j=0; j<numprocs; j++)
{
MAFile[0]='\0';
sprintf(MAFile,"%d",j);
strcat(MAFile,UserCommandFlag[i]._MomentumAccFlag_momacc_file);
if((fp2 = fopen(MAFile, "r")) == NULL)
{
fprintf(stdout, "%s: error while opening file.\n", MAFile);
exit_(1);
}
bool found=false;
while(fgets(buffer2, 80, fp2) != NULL)
{
if( (strstr(buffer2,"Negative")==0) && (!found)) { buffer2[0]='\0'; continue;}
if( (strstr(buffer2,"Negative")!=0) ) { found=true; buffer2[0]='\0'; continue;}
fputs(buffer2, outf2);
buffer2[0]='\0';
}
buffer2[0]='\0';
fclose(fp2);
}
fclose(outf2);
}
MPI_Barrier(MPI_COMM_WORLD);
printf("process %d finished momentum acceptance.\n", myid);
#else
MomentumAcceptance(UserCommandFlag[i]._MomentumAccFlag_momacc_file,
UserCommandFlag[i]._MomentumAccFlag_istart,
UserCommandFlag[i]._MomentumAccFlag_istop,
UserCommandFlag[i]._MomentumAccFlag_deltaminp,
UserCommandFlag[i]._MomentumAccFlag_deltamaxp,
UserCommandFlag[i]._MomentumAccFlag_nstepp,
UserCommandFlag[i]._MomentumAccFlag_deltaminn,
UserCommandFlag[i]._MomentumAccFlag_deltamaxn,
UserCommandFlag[i]._MomentumAccFlag_nstepn,
UserCommandFlag[i]._MomentumAccFlag_nturn,
UserCommandFlag[i]._MomentumAccFlag_zmax);
#endif
/* restore the initial values*/
globval.Cavity_on = cavityflag;
......@@ -765,11 +982,16 @@ Based on parts of functions get_param( ) & ID_corr(), etc in nsls-ii_lib.cc
printlatt("linlat1.out");
}
else
printf("Wrong!!!!!");
else{
printf("Command string %s is invalid!!!!!\n ",CommandStr);
exit_(1);
}//give an error message
}//end of looking for user defined flag
#if MPI_EXEC
MPI_Finalize(); //finish parallel computation
#endif
return 0;
}//end of main()
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment