From 8593f10219a5d907ee7523d95d6e2f47cc6eb7c9 Mon Sep 17 00:00:00 2001 From: Emanuele Leonardi Date: Tue, 23 Jul 2019 10:20:32 +0200 Subject: [PATCH 01/64] PadmeTrig: changed timepix_shutter_delay default to 0x02 --- PadmeTrig/src/Config.c | 4 ++-- PadmeTrig/src/Trigger.c | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/PadmeTrig/src/Config.c b/PadmeTrig/src/Config.c index d896a795..64cffa0f 100644 --- a/PadmeTrig/src/Config.c +++ b/PadmeTrig/src/Config.c @@ -49,8 +49,8 @@ int reset_config() Config->busy_mask = 0x10; // Only CPU busy active - // Set timepix shutter delay to 0 (no need for it as the BTF trigger is tuned for the timepix) - Config->timepix_shutter_delay = 0x00; + // Set timepix shutter delay to 2 (25ns) (WARNING: do not set it to 0 as delay will become 25us!) + Config->timepix_shutter_delay = 0x02; // Leave timepix shutter width to its default value (0x64 = 10us) Config->timepix_shutter_width = 0x64; diff --git a/PadmeTrig/src/Trigger.c b/PadmeTrig/src/Trigger.c index 17564f71..af1c9f9d 100644 --- a/PadmeTrig/src/Trigger.c +++ b/PadmeTrig/src/Trigger.c @@ -501,6 +501,7 @@ int trig_set_timepix_delay(unsigned char delay) { int rc; unsigned char fullmask[4]; + if (delay == 0) { printf("WARNING - trig_set_timepix_delay - delay set to 0: timepix shutter will start ~25us after the BTF trigger. Are you sure?\n"); } rc = trig_get_register(0x05,fullmask); if (rc != TRIG_OK) return rc; // Replace old timepix delay with new one From fe0a174757a44e1814b4bda1ce4e723146c33069 Mon Sep 17 00:00:00 2001 From: Emanuele Leonardi Date: Tue, 23 Jul 2019 15:37:25 +0200 Subject: [PATCH 02/64] PadmeTrig: added function to get firmware version; show firmware verion at startup --- PadmeTrig/include/Trigger.h | 2 ++ PadmeTrig/src/PadmeTrig.c | 42 ++++++++++++++++++++++++++++--------- PadmeTrig/src/Trigger.c | 11 ++++++++++ 3 files changed, 45 insertions(+), 10 deletions(-) diff --git a/PadmeTrig/include/Trigger.h b/PadmeTrig/include/Trigger.h index 26b2f422..7403a7a7 100644 --- a/PadmeTrig/include/Trigger.h +++ b/PadmeTrig/include/Trigger.h @@ -13,6 +13,8 @@ int trig_get_temperature(float*); // temperature int trig_start_run(); int trig_stop_run(); +int trig_get_fw_version(unsigned int*); // 32b firmware version + int trig_get_trigbusymask(unsigned char*); // 32b register 0x02: obsolete do not use int trig_get_trigmask(unsigned char*); // 8b trigger mask diff --git a/PadmeTrig/src/PadmeTrig.c b/PadmeTrig/src/PadmeTrig.c index 687cb192..4d04ce39 100644 --- a/PadmeTrig/src/PadmeTrig.c +++ b/PadmeTrig/src/PadmeTrig.c @@ -214,6 +214,16 @@ void timespec_diff(const struct timespec *start, const struct timespec *stop, return; } +void show_register(unsigned char reg) +{ + unsigned char mask[4]; + if ( trig_get_register(reg,mask) != TRIG_OK ) { + printf("PadmeTrig *** ERROR *** Problem while readying register 0x%02x. Exiting.\n",reg); + proc_finalize(1,1,1,1,DB_STATUS_INIT_FAIL); + } + printf("Current register 0x%02x: 0x%02x%02x%02x%02x\n",reg,mask[0],mask[1],mask[2],mask[3]); +} + int main(int argc, char *argv[]) { pid_t pid; @@ -445,12 +455,22 @@ int main(int argc, char *argv[]) { // Show registers before configuring board for (reg=0x00;reg<0x10;reg++) { - if ( trig_get_register(reg,mask) != TRIG_OK ) { - printf("PadmeTrig *** ERROR *** Problem while readying register 0x%02x. Exiting.\n",reg); - proc_finalize(1,1,1,1,DB_STATUS_INIT_FAIL); - } - printf("Current register 0x%02x: 0x%02x%02x%02x%02x\n",reg,mask[0],mask[1],mask[2],mask[3]); + //if ( trig_get_register(reg,mask) != TRIG_OK ) { + // printf("PadmeTrig *** ERROR *** Problem while readying register 0x%02x. Exiting.\n",reg); + // proc_finalize(1,1,1,1,DB_STATUS_INIT_FAIL); + //} + //printf("Current register 0x%02x: 0x%02x%02x%02x%02x\n",reg,mask[0],mask[1],mask[2],mask[3]); + show_register(reg); + } + show_register(0x1D); + + // Show firmware version + unsigned int fw_ver; + if ( trig_get_fw_version(&fw_ver) != TRIG_OK ) { + printf("PadmeTrig *** ERROR *** Problem while reading firmware version. Exiting.\n"); + proc_finalize(1,1,1,1,DB_STATUS_INIT_FAIL); } + printf("Firmware version: 0x%04x (%d)\n",fw_ver,fw_ver); // Program Trigger module with current configuration @@ -622,12 +642,14 @@ int main(int argc, char *argv[]) { // Show registers after initializing board for (reg=0x00;reg<0x10;reg++) { - if ( trig_get_register(reg,mask) != TRIG_OK ) { - printf("PadmeTrig *** ERROR *** Problem while readying register 0x%02x. Exiting.\n",reg); - proc_finalize(1,1,1,1,DB_STATUS_INIT_FAIL); - } - printf("Current register 0x%02x: 0x%02x%02x%02x%02x\n",reg,mask[0],mask[1],mask[2],mask[3]); + //if ( trig_get_register(reg,mask) != TRIG_OK ) { + // printf("PadmeTrig *** ERROR *** Problem while readying register 0x%02x. Exiting.\n",reg); + // proc_finalize(1,1,1,1,DB_STATUS_INIT_FAIL); + //} + //printf("Current register 0x%02x: 0x%02x%02x%02x%02x\n",reg,mask[0],mask[1],mask[2],mask[3]); + show_register(reg); } + show_register(0x1D); // If using STREAM output, open stream now to avoid DAQ locking if ( strcmp(Config->output_mode,"STREAM")==0 ) { diff --git a/PadmeTrig/src/Trigger.c b/PadmeTrig/src/Trigger.c index af1c9f9d..eb37933e 100644 --- a/PadmeTrig/src/Trigger.c +++ b/PadmeTrig/src/Trigger.c @@ -77,6 +77,8 @@ // // 0x19 RO busy register // [3:0] busy_in (3:0), [4] CPU busy +// +// 0x1D RO firmware version // === Trigger data format (64bit) === // [39:0] timestamp in clock cycles, resets in 13744s = 3h49m @@ -381,6 +383,15 @@ int trig_stop_run() return trig_set_register(0x00,fullmask); } +int trig_get_fw_version(unsigned int* fw_version) +{ + int rc; + unsigned char fullmask[4]; + rc = trig_get_register(0x1D,fullmask); + if (rc == TRIG_OK) *fw_version = fullmask[0]*(1<<24)+fullmask[1]*(1<<16)+fullmask[2]*(1<<8)+fullmask[3]; + return rc; +} + int trig_get_trigbusymask(unsigned char* mask) { // WARNING this function is obsolete and will be removed From 62ddacfd92fdd1de76e5265df0880e510c7aedeb Mon Sep 17 00:00:00 2001 From: taruggi Date: Thu, 8 Aug 2019 18:20:20 +0200 Subject: [PATCH 03/64] Fixed bug on SAC map --- PadmeReco/SAC/src/DigitizerChannelSAC.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/PadmeReco/SAC/src/DigitizerChannelSAC.cc b/PadmeReco/SAC/src/DigitizerChannelSAC.cc index fcf48ef3..877f5ad5 100644 --- a/PadmeReco/SAC/src/DigitizerChannelSAC.cc +++ b/PadmeReco/SAC/src/DigitizerChannelSAC.cc @@ -108,7 +108,7 @@ Double_t DigitizerChannelSAC::CalcPedestal() { double avg; Int_t fCh = GetChID(); UInt_t fTrigMask= GetTrigMask(); - Int_t ElCh = fCh/10*5 +fCh%5; + Int_t ElCh = fCh/10 +fCh%10*5; // std::cout< &hitArray,USh static Double_t AbsSamRec[1024]; Int_t fCh = GetChID(); - Int_t ElCh = fCh/10*5 +fCh%5; + Int_t ElCh = fCh/10 +fCh%10*5; fAvg80 = TMath::Mean(80,&fSamples[0]); // check the number of samples used depending on trigger offsets. for(UShort_t s=0;s Date: Mon, 4 Nov 2019 15:17:14 +0100 Subject: [PATCH 04/64] Now PadmeReco does not crash if there is some non-existing file in input, it will work only on existing ones. --- PadmeReco/PadmeReco.cc | 55 ++++++++++++++++++++++++++++++++++++------ 1 file changed, 47 insertions(+), 8 deletions(-) diff --git a/PadmeReco/PadmeReco.cc b/PadmeReco/PadmeReco.cc index 308f04b1..77acb16e 100644 --- a/PadmeReco/PadmeReco.cc +++ b/PadmeReco/PadmeReco.cc @@ -3,6 +3,7 @@ #include #include #include +#include #include "TFile.h" #include "TChain.h" @@ -39,6 +40,9 @@ void sighandler(int sig){ exit(0); } + +int run_name(std::string lin, std::string& name); + int main(Int_t argc, char **argv) { signal(SIGXCPU,sighandler); @@ -116,23 +120,44 @@ int main(Int_t argc, char **argv) return 0; } + std::vector file_names; TObjArray InputFileNameList; if(stat(Form(InputListFileName.Data()), &filestat) == 0) { //-l option used - std::ifstream InputList(InputListFileName.Data()); - while(InputFileName.ReadLine(InputList) && iFile < NFiles){ -// if(stat(Form(InputFileName.Data()), &filestat) == 0) - InputFileNameList.Add(new TObjString(InputFileName.Data())); - iFile++; - } - } else if(InputFileName.CompareTo("")) //-i option used -// if(stat(Form(InputFileName.Data()), &filestat) == 0) + std::ifstream InputList(InputListFileName.Data()); + while(InputFileName.ReadLine(InputList) && iFile < NFiles){ + //if(stat(Form(InputFileName.Data()), &filestat) == 0) + /*std::cout<<"InputFileName "< Date: Wed, 6 Nov 2019 14:52:38 +0100 Subject: [PATCH 05/64] Added automatic energy calibration based on event time. Old method still holds, but with complete file name. --- PadmeReco/ECal/include/ECalCalibHandler.hh | 58 ++++++ PadmeReco/ECal/include/ECalCalibration.hh | 21 +- PadmeReco/ECal/include/ECalReconstruction.hh | 2 +- PadmeReco/ECal/src/ECalCalibHandler.cc | 179 ++++++++++++++++++ PadmeReco/ECal/src/ECalCalibration.cc | 154 +++++++++------ PadmeReco/ECal/src/ECalReconstruction.cc | 29 +++ .../ECalEnergyCalibTimeIntervals.txt | 2 + 7 files changed, 388 insertions(+), 57 deletions(-) create mode 100644 PadmeReco/ECal/include/ECalCalibHandler.hh create mode 100644 PadmeReco/ECal/src/ECalCalibHandler.cc create mode 100644 PadmeReco/config/Calibration/ECalEnergyCalibTimeIntervals.txt diff --git a/PadmeReco/ECal/include/ECalCalibHandler.hh b/PadmeReco/ECal/include/ECalCalibHandler.hh new file mode 100644 index 00000000..ee0b35c7 --- /dev/null +++ b/PadmeReco/ECal/include/ECalCalibHandler.hh @@ -0,0 +1,58 @@ +#ifndef ECalCalibHandler_h +#define ECalCalibHandler_h + +#include "TTimeStamp.h" + +#include +#include +#include +#include +#include +#include "cstdlib" + +#define ECALCALIBHANDLER_NBOARD 32 +#define ECALCALIBHANDLER_NCHAN 32 + + +class ECalCalibHandler { + +public : + + static ECalCalibHandler* GetInstance(); + ~ECalCalibHandler(); + +private: + + static ECalCalibHandler* fInstance; + +protected: + + ECalCalibHandler(); + +public: + + // List of calib files + void SetFileList(std::string list) {fList = list;} + std::string GetFileList(){return fList;} + + void Initialise(); + + // time interval in which the event falls + void SetTimeInterval(TTimeStamp); + + // calibration constants for (BD_id, CH_id) from file + void SetCalibVal(std::string); + double GetCalibVal(int BD, int CH){return fCalibVal[BD][CH];} + +private : + + std::string fList; + TTimeStamp fCurTStart; + TTimeStamp fCurTEnd; + // < , calib_file > + std::map , std::string> fFileMap; + double fCalibVal[ECALCALIBHANDLER_NBOARD][ECALCALIBHANDLER_NCHAN]; + + +}; +#endif diff --git a/PadmeReco/ECal/include/ECalCalibration.hh b/PadmeReco/ECal/include/ECalCalibration.hh index 3ea6ac32..5fefd6c3 100644 --- a/PadmeReco/ECal/include/ECalCalibration.hh +++ b/PadmeReco/ECal/include/ECalCalibration.hh @@ -2,13 +2,17 @@ // History: // // Created by Stefania Spagnolo (stefania.spagnolo@le.infn.it) 2019-03-14 -// +// Modified by Gabriele Piperno (gabriele.piperno@roma1.infn.it) 2019-10-10 // -------------------------------------------------------------- #ifndef ECalCalibration_H #define ECalCalibration_H #include "PadmeVCalibration.hh" +#include "ECalCalibHandler.hh" +class TRawEvent; + #include +#include class ECalCalibration : public PadmeVCalibration @@ -20,9 +24,20 @@ public: ~ECalCalibration(); void Init(PadmeVRecoConfig *cfg, RecoVChannelID *chIdMgr ); void ReadCalibConstant(); - void PerformCalibration(std::vector &hitArray); + void PerformCalibration(std::vector &hitArray,TRawEvent* rawEv); + int SearchCalibFile(); + void SetEventTime(TTimeStamp ET){fEvTime = ET;} + TTimeStamp GetEventTime(){return fEvTime;} + private: + + ECalCalibHandler* fCalibHandler; + + TTimeStamp fEvTime; + + std::string fCalibList; + Int_t fUseCalibE; Int_t fUseCalibT; @@ -37,7 +52,7 @@ private: double fHitTCorrected; double fCalibConst; - int fCalibVersion; + std::string fCalibVersion; double fBID; double fChID; diff --git a/PadmeReco/ECal/include/ECalReconstruction.hh b/PadmeReco/ECal/include/ECalReconstruction.hh index 8ce88e9b..b57cb29a 100644 --- a/PadmeReco/ECal/include/ECalReconstruction.hh +++ b/PadmeReco/ECal/include/ECalReconstruction.hh @@ -24,7 +24,7 @@ public: // virtual void Init(PadmeVReconstruction*); // void Init(PadmeVReconstruction* MainReco); // virtual void ProcessEvent(TMCVEvent*,TMCEvent*); - // virtual void ProcessEvent(TRawEvent*); + void ProcessEvent(TRawEvent*); void BuildHits(TRawEvent* rawEv); Double_t CompensateMissingE(Double_t ECl,Int_t ClSeed); //M. Raggi 21/05/2019 virtual void BuildClusters(); diff --git a/PadmeReco/ECal/src/ECalCalibHandler.cc b/PadmeReco/ECal/src/ECalCalibHandler.cc new file mode 100644 index 00000000..95758db8 --- /dev/null +++ b/PadmeReco/ECal/src/ECalCalibHandler.cc @@ -0,0 +1,179 @@ +#include "ECalCalibHandler.hh" +#include "TTimeStamp.h" + +#include +#include +#include +#include +#include +#include +#include "cstdlib" + +ECalCalibHandler* ECalCalibHandler::fInstance = 0; + +ECalCalibHandler* ECalCalibHandler::GetInstance() +{ + if ( fInstance == 0 ) { fInstance = new ECalCalibHandler(); } + return fInstance; +} + + + + +ECalCalibHandler::ECalCalibHandler() +{ + // default list file + fList = "config/Calibration/ECalEnergyCalibTimeIntervals.txt"; + + // set to 0 all the calib constants + for (size_t ii = 0; ii < ECALCALIBHANDLER_NBOARD; ++ii){ + for (size_t jj = 0; jj < ECALCALIBHANDLER_NCHAN; ++jj){ + + fCalibVal[ii][jj] = 0.; + } + } + + // set to 0 start and end point + fCurTStart.Set(0,0,0,1,0); + fCurTEnd.Set(0,0,0,1,0); + +} + + + + +ECalCalibHandler::~ECalCalibHandler() +{;} // do I have to free fFileMap and fCalibVal? + + + + +void ECalCalibHandler::Initialise() +{ + + std::string line; + std::ifstream listfile(fList); + + if(listfile.is_open()){ // open list file and fill corresponding map + while(getline(listfile,line)){ + int Ti_d, Ti_h, Te_d, Te_h; + TTimeStamp Ti, Te; + std::string file; + std::stringstream ss(line); + ss>>Ti_d>>Ti_h>>Te_d>>Te_h>>file; + + Ti.Set(Ti_d,Ti_h,0,1,0); + Te.Set(Te_d,Te_h,0,1,0); + fFileMap[std::make_pair(Ti,Te)] = file; + } + + listfile.close(); + + } else { // list file not found + std::cout<<"\nWARNING!!! File "<, std::string>::iterator FM_it; + for(FM_it = fFileMap.begin();FM_it!=fFileMap.end();++FM_it){ + if( ((FM_it->first).first <= T_ev) && (T_ev <= (FM_it->first).second) ){ + + // load calib const + SetCalibVal(FM_it->second); + + + // set new interval start and end points + fCurTStart = (FM_it->first).first; + fCurTEnd = (FM_it->first).second; + + // found interval + found = true; + + std::cout<<"Setting ECal energy calibration file to "<second + <<". Valid from "<> row >> col >> BD >> CH >> QMIP; + + // set the values + fCalibVal[BD][CH] = 1./QMIP; + } + + } else { // impossible to open calibration file + std::cout<<"WARNING!!! Impossible to open ECal energy calibration file "< + ECalCalibration::ECalCalibration() : PadmeVCalibration() @@ -16,6 +19,8 @@ ECalCalibration::ECalCalibration() } + + ECalCalibration::~ECalCalibration() { fT0Map.clear(); @@ -23,108 +28,151 @@ ECalCalibration::~ECalCalibration() } + void ECalCalibration::Init(PadmeVRecoConfig *cfg, RecoVChannelID *chIdMgr ){ fUseCalibE = (int)cfg->GetParOrDefault("EnergyCalibration","UseCalibration",1); fGlobEnScale = (double)cfg->GetParOrDefault("EnergyCalibration","AveragepCMeV",15.); + fCalibList = (std::string)cfg->GetParOrDefault("EnergyCalibration","EnergyCalibIntervalsList","ECalEnergyCalibTimeIntervals.txt"); + fCalibVersion = (std::string)cfg->GetParOrDefault("EnergyCalibration","CalibVersion","0"); fUseCalibT = (int)cfg->GetParOrDefault("TimeAlignment","UseTimeAlignment",1); - fCalibVersion = (int)cfg->GetParOrDefault("EnergyCalibration","CalibVersion",3); - // Energy calibration - - if(fUseCalibE) { - char fname[256]; - sprintf(fname,"config/Calibration/ECalEnergyCalibration_%d.dat",fCalibVersion); - ECalib.open(fname); - } - // if(fUseCalibE==1) ECalib.open("config/Calibration/ECalCalibConst.txt"); - // if(fUseCalibE==2) ECalib.open("config/Calibration/equalization_constants2.dat"); + + + // Energy calibration // + + if(fUseCalibE){ + + fMuonDepositedEnergy=17.5; + fGlobEnScale=15; - if(fUseCalibE>0 && !ECalib.is_open()){ - // if(fUseCalibE==1 && !ECalib.is_open()){ - std::cout<<"ERROR: Cannot find ECal calibration file "<<"**************"<SetFileList(path); + std::cout<<"List file in use: "<GetFileList()<Initialise(); + + } else if(fCalibVersion!="0") { // same calib file for all the events + + char fname[256]; + //sprintf(fname,"config/Calibration/ECalEnergyCalibration_%d.dat",fCalibVersion); + sprintf(fname,"config/Calibration/%s",fCalibVersion.c_str()); + ECalib.open(fname); + std::cout<<"Using a USER SELECTED energy calibration file for ECal: " + <0 || fUseCalibT==1) ReadCalibConstant(); + + if((fUseCalibE>0 && fCalibVersion!="0") || fUseCalibT==1) ReadCalibConstant(); + } + + void ECalCalibration::ReadCalibConstant() { double MIPCharge,TimeOffSet; int NBD,CID; int row,col; - fMuonDepositedEnergy=17.5; - fGlobEnScale=15; - //Read Energy calibration constants - if(ECalib.is_open()){ - for(int i=0;i<616;i++){ - ECalib >> row >> col >> NBD >> CID >> MIPCharge; //reads Piperno informations need cross-check - fCalibMap[std::make_pair(NBD,CID)] = MIPCharge/(fMuonDepositedEnergy*fGlobEnScale); - // fCalibMap[std::make_pair(row,col)] = MIPCharge/(fMuonDepositedEnergy*fGlobEnScale); - //std::cout<> row >> col >> NBD >> CID >> MIPCharge; //reads Piperno informations need cross-check + fCalibMap[std::make_pair(NBD,CID)] = MIPCharge/(fMuonDepositedEnergy*fGlobEnScale); + //fCalibMap[std::make_pair(row,col)] = MIPCharge/(fMuonDepositedEnergy*fGlobEnScale); + //std::cout<> row >> col >> NBD >> CID>>TimeOffSet; //reads Piperno informations need cross-check + TCalib >> row >> col >> NBD >> CID>>TimeOffSet; fT0Map[std::make_pair(NBD,CID)] = TimeOffSet; - // fCalibMap[std::make_pair(row,col)] = MIPCharge/(fMuonDepositedEnergy*fGlobEnScale); + //fCalibMap[std::make_pair(row,col)] = MIPCharge/(fMuonDepositedEnergy*fGlobEnScale); //std::cout< &Hits) +void ECalCalibration::PerformCalibration(std::vector &Hits, TRawEvent* rawEv) { static int PRINTED = 0; for(unsigned int iHit = 0;iHit < Hits.size();++iHit){ + + // Energy calibration // if (fUseCalibE > 0){ + int ich = Hits[iHit]->GetChannelId(); //need to convert into BDID e CHID unsigned int BD = Hits[iHit]->getBDid(); - unsigned int ChID = Hits[iHit]->getCHid(); - // Correcting for different crystals response + unsigned int ChID = Hits[iHit]->getCHid(); + fHitE = Hits[iHit]->GetEnergy(); - if(fCalibMap[std::make_pair(BD,ChID)]!=0){ - fHitECalibrated= fHitE/fCalibMap[std::make_pair(BD,ChID)]; + + // Correcting for different crystals response + if(fCalibVersion=="0"){ + TTimeStamp time = rawEv->GetEventAbsTime(); + /* + std::cout<<"Hit "<SetTimeInterval(time); + fHitECalibrated = fHitE*(fCalibHandler->GetCalibVal(BD,ChID))*fMuonDepositedEnergy*fGlobEnScale; Hits[iHit]->SetEnergy(fHitECalibrated); - // std::cout<<"channel ID "<GetCalibVal(BD,ChID))*fMuonDepositedEnergy*fGlobEnScale<SetEnergy(fHitECalibrated); + //std::cout<<"channel ID "<GetTime(); fHitTCorrected = fHitT-fT0Map[std::make_pair(fBID,fChID)]; - // std::cout<<"channel ID "<SetTime(fHitTCorrected); } } } - diff --git a/PadmeReco/ECal/src/ECalReconstruction.cc b/PadmeReco/ECal/src/ECalReconstruction.cc index 3a21b6a3..10f04a69 100644 --- a/PadmeReco/ECal/src/ECalReconstruction.cc +++ b/PadmeReco/ECal/src/ECalReconstruction.cc @@ -119,6 +119,35 @@ TRecoVEvent * ECalReconstruction::ProcessEvent(TDetectorVEvent* tEvent, Event* t */ +void ECalReconstruction::ProcessEvent(TRawEvent* rawEv){ + + // use trigger info + if(fTriggerProcessor) { + //std::cout<<"Reconstruction named <"< processing TriggerInfo .... "<PerformCalibration(GetRecoHits(),rawEv); + if(fGeometry) fGeometry->ComputePositions(GetRecoHits()); + + // from Hits to Clusters + ClearClusters(); + BuildClusters(); + if(fChannelCalibration) fChannelCalibration->PerformCalibration(GetClusters()); + + //Processing is over, let's analyze what's here, if foreseen + if(fGlobalRecoConfigOptions->IsMonitorMode()) { + AnalyzeEvent(rawEv); + } + +} + + + bool ECalReconstruction::TriggerToBeSkipped() { if ( GetGlobalRecoConfigOptions()->IsRecoMode() && !(GetTriggerProcessor()->IsBTFTrigger()) ) return true; diff --git a/PadmeReco/config/Calibration/ECalEnergyCalibTimeIntervals.txt b/PadmeReco/config/Calibration/ECalEnergyCalibTimeIntervals.txt new file mode 100644 index 00000000..7b4e5b02 --- /dev/null +++ b/PadmeReco/config/Calibration/ECalEnergyCalibTimeIntervals.txt @@ -0,0 +1,2 @@ +20190111 000000 20190113 235959 ECalEnergyCalibration_2.dat +20190227 000000 20190302 000000 ECalEnergyCalibration_4.dat From 568bf66c4c91332f80fbdbc59cd8c4f8be6a4d9f Mon Sep 17 00:00:00 2001 From: Stefania Spagnolo Date: Wed, 13 Nov 2019 16:59:46 +0100 Subject: [PATCH 06/64] improve event selection --- .../AnalysisBase/src/EventSelection.cc | 366 +++++++++++++----- 1 file changed, 277 insertions(+), 89 deletions(-) diff --git a/PadmeAnalysis/AnalysisBase/src/EventSelection.cc b/PadmeAnalysis/AnalysisBase/src/EventSelection.cc index 0338fbe6..12add70b 100644 --- a/PadmeAnalysis/AnalysisBase/src/EventSelection.cc +++ b/PadmeAnalysis/AnalysisBase/src/EventSelection.cc @@ -255,87 +255,6 @@ Bool_t EventSelection::ProcessAnalysisSS() Double_t xEnergy=0; std::string hname; - // if (fPVeto_hitEvent->GetNHits() > 0){ - // for (int hPVeto=0; hPVetoGetNHits(); ++hPVeto) - // { - // xHit = fPVeto_hitEvent->Hit(hPVeto); - // xTime= xHit->GetTime(); - // xChId= xHit->GetChannelId(); - // xEnergy = xHit->GetEnergy(); - // hname=hprefix+"timePVetoVsCh_Hits"; - // hSvc->FillHisto2(hname, xTime, (float)xChId); - // //hname=hprefix+"timePVetoVsCh_linearCorr_Hits"; - // //xTimeLinCorr = applyTimePVetoLinCorr((float)xChId, xTime); - // //hSvc->FillHisto2(hname, xTimeLinCorr, (float)xChId); - // for (int hECal=0; hECalGetNHits(); ++hECal) - // { - // yHit = fECal_hitEvent->Hit(hECal); - // yTime= yHit->GetTime(); - // yChId= yHit->GetChannelId(); - // yEne = yHit->GetEnergy(); - // /* - // if (hPVeto==0) - // { - // eSumECalHits = eSumECalHits+yEne; - // hname="ECalEnergyMap_Hits"; - // hSvc->FillHisto2(hname, float(int(yChId/100)), float(yChId%100), yEne); - // hname="ECalMap_Hits"; - // hSvc->FillHisto2(hname, float(int(yChId/100)), float(yChId%100)); - // } - // */ - - // hname=hprefix+"timeECalVsPVeto_Hits"; - // hSvc->FillHisto2(hname, xTime, yTime); - // } - // for (int hSAC=0; hSACGetNHits(); ++hSAC) - // { - // yHit = fSAC_hitEvent->Hit(hSAC); - // if (yHit->GetChannelId()!=21) continue; - - // yTime= yHit->GetTime(); - // hname="DtimePVetoVsSAC21_Hits"; - // hSvc->FillHisto(hname, xTime-30.7-yTime); - // hname="DtimePVetoVsSAC21LinCorr_Hits"; - // hSvc->FillHisto(hname, xTimeLinCorr-yTime); - // } - // for (int cSAC=0; cSACGetNElements(); ++cSAC) - // { - // yClu = fSAC_ClColl->Element(cSAC); - - // if (yClu->GetEnergy()<50.) continue; - // if ( fabs(yClu->GetTime()-xTimeLinCorr) > 1. ) continue; - - // hname="SACClEVsPVetoHitChId_1ns_linearCorr"; - // hSvc->FillHisto2(hname, (float)xChId, yClu->GetEnergy()); - // if (yClu->GetChannelId()!=21) continue; - // if(xEnergy < 10.) continue; - // if(xEnergy > 50.) continue; - // hname="SACClE21VsPVetoHitChId_1ns_linearCorr"; - // hSvc->FillHisto2(hname, (float)xChId, yClu->GetEnergy()); - // } - // } - - // //Cluster Based - // for (int hPVeto=0; hPVetoGetNElements(); ++hPVeto) - // { - // xClu = fPVeto_ClColl->Element(hPVeto); - // xTime= xClu->GetTime(); - // xChId= xClu->GetChannelId(); - // for (int hECal=0; hECalGetNElements(); ++hECal) - // { - // yClu = fECal_ClColl->Element(hECal); - // yTime= yClu->GetTime(); - // yChId= yClu->GetChannelId(); - // yEne = yClu->GetEnergy(); - - // hname="timeECalVsPVeto_Clus"; - // hSvc->FillHisto2(hname, xTime, yTime); - // hname="energyECalVsChIdPVeto_Clus_inTime10"; - // if (fabs(yTime-xTime)<10.) hSvc->FillHisto2(hname, float(xChId), yEne); - - // } - // } - // } double pigreco = acos(-1.); @@ -351,9 +270,18 @@ Bool_t EventSelection::ProcessAnalysisSS() yChId= yHit->GetChannelId(); eSumECalHits = eSumECalHits+yEne; hname=hprefix+"ECalEnergyMap_Hits"; - hSvc->FillHisto2(hname, float(int(yChId/100)), float(yChId%100), yEne); - hname=hprefix+"ECalMap_Hits"; - hSvc->FillHisto2(hname, float(int(yChId/100)), float(yChId%100)); + if (isMC) + { + hSvc->FillHisto2(hname, float(int(yChId%100)), float(yChId/100), yEne); + hname=hprefix+"ECalMap_Hits"; + hSvc->FillHisto2(hname, float(int(yChId%100)), float(yChId/100)); + } + else + { + hSvc->FillHisto2(hname, float(int(yChId/100)), float(yChId%100), yEne); + hname=hprefix+"ECalMap_Hits"; + hSvc->FillHisto2(hname, float(int(yChId/100)), float(yChId%100)); + } } hname=hprefix+"energySumECalHits"; hSvc->FillHisto(hname, eSumECalHits); @@ -422,7 +350,8 @@ Bool_t EventSelection::ProcessAnalysisSS() int n2gindt2_5=0; int n2gindt1=0; int n2g=0; - + int n2gDsume=0; + int n2gFR = 0; //std::cout<<" pointer to collection = "<<(long)fECal_ClColl<GetNElements()<FillHisto(hname, xEne); pos1 = xClu->GetPosition(); + if (isMC) + { + double tno = pos1.y(); + pos1.SetY(pos1.x()); + pos1.SetX(tno); + } // Sum of All Cluster Energy eSumCl = eSumCl+xEne; @@ -448,6 +383,9 @@ Bool_t EventSelection::ProcessAnalysisSS() hname=hprefix+"GammaSeleCutFlow"; hSvc->FillHisto(hname,cut_g_all); + hname="SS2g_ClTime"; + hSvc->FillHisto(hname, xTime); + if (xEne > eMax) { iELead = hECal; @@ -479,6 +417,13 @@ Bool_t EventSelection::ProcessAnalysisSS() aChId = aClu->GetChannelId(); aTime = aClu->GetTime(); pos2 = aClu->GetPosition(); + if (isMC) + { + double tno = pos2.y(); + pos2.SetY(pos2.x()); + pos2.SetX(tno); + } + aSumCl = xEne+aEne; if (aSumCl > aSumClMax) @@ -523,7 +468,7 @@ Bool_t EventSelection::ProcessAnalysisSS() } - if (fabs(dt)<10.) + if (fabs(dt)<30.) { hname = "SS2gSumE_passDt"; hSvc->FillHisto(hname, aSumCl); @@ -534,6 +479,30 @@ Bool_t EventSelection::ProcessAnalysisSS() hSvc->FillHisto(hname, xcog); hname = "SS2gYcog_passDt"; hSvc->FillHisto(hname, ycog); + + + hname = "SS2gE_passDt"; + hSvc->FillHisto(hname, xEne, 0.5); + hSvc->FillHisto(hname, aEne, 0.5); + hname = "SS2gX_passDt"; + hSvc->FillHisto(hname, pos1.x(), 0.5); + hSvc->FillHisto(hname, pos2.x(), 0.5); + hname = "SS2gY_passDt"; + hSvc->FillHisto(hname, pos1.y(), 0.5); + hSvc->FillHisto(hname, pos2.y(), 0.5); + hname = "SS2gXEw_passDt"; + hSvc->FillHisto(hname, pos1.x(), xEne); + hSvc->FillHisto(hname, pos2.x(), aEne); + hname = "SS2gYEw_passDt"; + hSvc->FillHisto(hname, pos1.y(), xEne); + hSvc->FillHisto(hname, pos2.y(), aEne); + hname = "SS2gXYEw_passDt"; + hSvc->FillHisto2(hname, pos1.x(), pos1.y(), xEne); + hSvc->FillHisto2(hname, pos2.x(), pos2.y(), aEne); + + + + if (cos(dPhi)FillHisto(hname, ycog); + + hname = "SS2gE_passDtDphi"; + hSvc->FillHisto(hname, xEne, 0.5); + hSvc->FillHisto(hname, aEne, 0.5); + hname = "SS2gX_passDtDphi"; + hSvc->FillHisto(hname, pos1.x(), 0.5); + hSvc->FillHisto(hname, pos2.x(), 0.5); + hname = "SS2gY_passDtDphi"; + hSvc->FillHisto(hname, pos1.y(), 0.5); + hSvc->FillHisto(hname, pos2.y(), 0.5); + hname = "SS2gXEw_passDtDphi"; + hSvc->FillHisto(hname, pos1.x(), xEne); + hSvc->FillHisto(hname, pos2.x(), aEne); + hname = "SS2gYEw_passDtDphi"; + hSvc->FillHisto(hname, pos1.y(), xEne); + hSvc->FillHisto(hname, pos2.y(), aEne); + hname = "SS2gXYEw_passDtDphi"; + hSvc->FillHisto2(hname, pos1.x(), pos1.y(), xEne); + hSvc->FillHisto2(hname, pos2.x(), pos2.y(), aEne); + if (fabs(xcog)<20. && fabs(ycog)<20.) { //hname = "SS2gSumE_passdt1dphi10cog10"; @@ -559,6 +548,104 @@ Bool_t EventSelection::ProcessAnalysisSS() hname = "SS2gDphi_passDtDphiCog"; hSvc->FillHisto(hname, dPhi*180/pigreco); n2g+=1; + + + hname = "SS2gE_passDtDphiCog"; + hSvc->FillHisto(hname, xEne, 0.5); + hSvc->FillHisto(hname, aEne, 0.5); + hname = "SS2gX_passDtDphiCog"; + hSvc->FillHisto(hname, pos1.x(), 0.5); + hSvc->FillHisto(hname, pos2.x(), 0.5); + hname = "SS2gY_passDtDphiCog"; + hSvc->FillHisto(hname, pos1.y(), 0.5); + hSvc->FillHisto(hname, pos2.y(), 0.5); + hname = "SS2gXEw_passDtDphiCog"; + hSvc->FillHisto(hname, pos1.x(), xEne); + hSvc->FillHisto(hname, pos2.x(), aEne); + hname = "SS2gYEw_passDtDphiCog"; + hSvc->FillHisto(hname, pos1.y(), xEne); + hSvc->FillHisto(hname, pos2.y(), aEne); + hname = "SS2gXYEw_passDtDphiCog"; + hSvc->FillHisto2(hname, pos1.x(), pos1.y(), xEne); + hSvc->FillHisto2(hname, pos2.x(), pos2.y(), aEne); + + + + if (fabs(pos1.x())>100. && fabs(pos2.x())>100. && fabs(pos1.y())<200. && fabs(pos2.y())<200.) { + n2gFR+=1; + hname = "SS2gSumE_passDtDphiCogFR"; + hSvc->FillHisto(hname, aSumCl); + + hname = "SS2gDt_passDtDphiCogFR"; + hSvc->FillHisto(hname, dt); + hname = "SS2gDphi_passDtDphiCogFR"; + hSvc->FillHisto(hname, dPhi*180/pigreco); + hname = "SS2gXcog_passDtDphiCogFR"; + hSvc->FillHisto(hname, xcog); + hname = "SS2gYcog_passDtDphiCogFR"; + hSvc->FillHisto(hname, ycog); + + + hname = "SS2gE_passDtDphiCogFR"; + hSvc->FillHisto(hname, xEne, 0.5); + hSvc->FillHisto(hname, aEne, 0.5); + hname = "SS2gX_passDtDphiCogFR"; + hSvc->FillHisto(hname, pos1.x(), 0.5); + hSvc->FillHisto(hname, pos2.x(), 0.5); + hname = "SS2gY_passDtDphiCogFR"; + hSvc->FillHisto(hname, pos1.y(), 0.5); + hSvc->FillHisto(hname, pos2.y(), 0.5); + hname = "SS2gXEw_passDtDphiCogFR"; + hSvc->FillHisto(hname, pos1.x(), xEne); + hSvc->FillHisto(hname, pos2.x(), aEne); + hname = "SS2gYEw_passDtDphiCogFR"; + hSvc->FillHisto(hname, pos1.y(), xEne); + hSvc->FillHisto(hname, pos2.y(), aEne); + + + + if (fabs(aSumCl-490.)<50.) + { + hname = "SS2gSumE_passDtDphiCogDsume"; + hSvc->FillHisto(hname, aSumCl); + + hname = "SS2gDt_passDtDphiCogDsume"; + hSvc->FillHisto(hname, dt); + hname = "SS2gDphi_passDtDphiCogDsume"; + hSvc->FillHisto(hname, dPhi*180/pigreco); + hname = "SS2gXcog_passDtDphiCogDsume"; + hSvc->FillHisto(hname, xcog); + hname = "SS2gYcog_passDtDphiCogDsume"; + hSvc->FillHisto(hname, ycog); + + + hname = "SS2gE_passDtDphiCogDsume"; + hSvc->FillHisto(hname, xEne, 0.5); + hSvc->FillHisto(hname, aEne, 0.5); + hname = "SS2gX_passDtDphiCogDsume"; + hSvc->FillHisto(hname, pos1.x(), 0.5); + hSvc->FillHisto(hname, pos2.x(), 0.5); + hname = "SS2gY_passDtDphiCogDsume"; + hSvc->FillHisto(hname, pos1.y(), 0.5); + hSvc->FillHisto(hname, pos2.y(), 0.5); + hname = "SS2gXEw_passDtDphiCogDsume"; + hSvc->FillHisto(hname, pos1.x(), xEne); + hSvc->FillHisto(hname, pos2.x(), aEne); + hname = "SS2gYEw_passDtDphiCogDsume"; + hSvc->FillHisto(hname, pos1.y(), xEne); + hSvc->FillHisto(hname, pos2.y(), aEne); + hname = "SS2gXYEw_passDtDphiCogDsume"; + hSvc->FillHisto2(hname, pos1.x(), pos1.y(), xEne); + hSvc->FillHisto2(hname, pos2.x(), pos2.y(), aEne); + + + n2gDsume+=1; + + + + + } + } } } } @@ -778,6 +865,8 @@ Bool_t EventSelection::ProcessAnalysisSS() hSvc->FillHisto(hname,float(n2gindt1)); hname="SS2g_n2g"; hSvc->FillHisto(hname,float(n2g)); + hname="SS2g_n2gFR"; + hSvc->FillHisto(hname,float(n2gFR)); hname=hprefix+"energySumECalClus"; @@ -2570,6 +2659,7 @@ Bool_t EventSelection::InitHistosAnalysis() maxX = 1500.; hSvc->BookHisto(hname, nBinX, minX, maxX); + hname="SS2g_nclus"; hSvc->BookHisto(hname, 50, 0.5, 50.5); hname="SS2g_nclus50"; @@ -2584,8 +2674,17 @@ Bool_t EventSelection::InitHistosAnalysis() hSvc->BookHisto(hname, 10, 0.5, 10.5); hname="SS2g_n2g"; hSvc->BookHisto(hname, 10, 0.5, 10.5); + hname="SS2g_n2gFR"; + hSvc->BookHisto(hname, 10, 0.5, 10.5); + + hname="SS2g_ClTime"; + hSvc->BookHisto(hname, 100, -500, 500); + hname = "SS2gSumE_passDtDphiCogDsume"; + hSvc->BookHisto(hname, nBinX, minX, maxX); + hname = "SS2gSumE_passDtDphiCogFR"; + hSvc->BookHisto(hname, nBinX, minX, maxX); hname = "SS2gSumE_passDtDphiCog"; hSvc->BookHisto(hname, nBinX, minX, maxX); hname = "SS2gSumE_passDtDphi"; @@ -2593,9 +2692,14 @@ Bool_t EventSelection::InitHistosAnalysis() hname = "SS2gSumE_passDt"; hSvc->BookHisto(hname, nBinX, minX, maxX); + nBinX= 100; minX = -50; maxX = 50.; + hname = "SS2gDt_passDtDphiCogDsume"; + hSvc->BookHisto(hname, nBinX, minX, maxX); + hname = "SS2gDt_passDtDphiCogFR"; + hSvc->BookHisto(hname, nBinX, minX, maxX); hname = "SS2gDt_passDtDphiCog"; hSvc->BookHisto(hname, nBinX, minX, maxX); hname = "SS2gDt_passDtDphi"; @@ -2604,6 +2708,10 @@ Bool_t EventSelection::InitHistosAnalysis() nBinX= 720; minX = -360; maxX = 360.; + hname = "SS2gDphi_passDtDphiCogDsume"; + hSvc->BookHisto(hname, nBinX, minX, maxX); + hname = "SS2gDphi_passDtDphiCogFR"; + hSvc->BookHisto(hname, nBinX, minX, maxX); hname = "SS2gDphi_passDtDphiCog"; hSvc->BookHisto(hname, nBinX, minX, maxX); hname = "SS2gDphi_passDtDphi"; @@ -2611,11 +2719,92 @@ Bool_t EventSelection::InitHistosAnalysis() hname = "SS2gDphi_passDt"; hSvc->BookHisto(hname, nBinX, minX, maxX); + nBinX= 100; + minX = 50; + maxX = 450.; + hname = "SS2gE_passDtDphiCogDsume"; + hSvc->BookHisto(hname, nBinX, minX, maxX); + hname = "SS2gE_passDtDphiCogFR"; + hSvc->BookHisto(hname, nBinX, minX, maxX); + hname = "SS2gE_passDtDphiCog"; + hSvc->BookHisto(hname, nBinX, minX, maxX); + hname = "SS2gE_passDtDphi"; + hSvc->BookHisto(hname, nBinX, minX, maxX); + hname = "SS2gE_passDt"; + hSvc->BookHisto(hname, nBinX, minX, maxX); + + nBinX= 50; + minX = -400; + maxX = 400.; + minY = -400; + maxY = 400.; + hname = "SS2gX_passDtDphiCogDsume"; + hSvc->BookHisto(hname, nBinX, minX, maxX); + hname = "SS2gX_passDtDphiCogFR"; + hSvc->BookHisto(hname, nBinX, minX, maxX); + hname = "SS2gX_passDtDphiCog"; + hSvc->BookHisto(hname, nBinX, minX, maxX); + hname = "SS2gX_passDtDphi"; + hSvc->BookHisto(hname, nBinX, minX, maxX); + hname = "SS2gX_passDt"; + hSvc->BookHisto(hname, nBinX, minX, maxX); + hname = "SS2gY_passDtDphiCogDsume"; + hSvc->BookHisto(hname, nBinX, minX, maxX); + hname = "SS2gY_passDtDphiCogFR"; + hSvc->BookHisto(hname, nBinX, minX, maxX); + hname = "SS2gY_passDtDphiCog"; + hSvc->BookHisto(hname, nBinX, minX, maxX); + hname = "SS2gY_passDtDphi"; + hSvc->BookHisto(hname, nBinX, minX, maxX); + hname = "SS2gY_passDt"; + hSvc->BookHisto(hname, nBinX, minX, maxX); + + hname = "SS2gXEw_passDtDphiCogDsume"; + hSvc->BookHisto(hname, nBinX, minX, maxX); + hname = "SS2gXEw_passDtDphiCogFR"; + hSvc->BookHisto(hname, nBinX, minX, maxX); + hname = "SS2gXEw_passDtDphiCog"; + hSvc->BookHisto(hname, nBinX, minX, maxX); + hname = "SS2gXEw_passDtDphi"; + hSvc->BookHisto(hname, nBinX, minX, maxX); + hname = "SS2gXEw_passDt"; + hSvc->BookHisto(hname, nBinX, minX, maxX); + hname = "SS2gYEw_passDtDphiCogDsume"; + hSvc->BookHisto(hname, nBinX, minX, maxX); + hname = "SS2gYEw_passDtDphiCogFR"; + hSvc->BookHisto(hname, nBinX, minX, maxX); + hname = "SS2gYEw_passDtDphiCog"; + hSvc->BookHisto(hname, nBinX, minX, maxX); + hname = "SS2gYEw_passDtDphi"; + hSvc->BookHisto(hname, nBinX, minX, maxX); + hname = "SS2gYEw_passDt"; + hSvc->BookHisto(hname, nBinX, minX, maxX); + + nBinX= 100; + nBinY= 100; + hname = "SS2gXYEw_passDtDphiCogDsume"; + hSvc->BookHisto2(hname, nBinX, minX, maxX, nBinY, minY, maxY); + hname = "SS2gXYEw_passDtDphiCog"; + hSvc->BookHisto2(hname, nBinX, minX, maxX, nBinY, minY, maxY); + hname = "SS2gXYEw_passDtDphi"; + hSvc->BookHisto2(hname, nBinX, minX, maxX, nBinY, minY, maxY); + hname = "SS2gXYEw_passDt"; + hSvc->BookHisto2(hname, nBinX, minX, maxX, nBinY, minY, maxY); + + nBinX= 100; minX = -200; maxX = 200.; + hname = "SS2gXcog_passDtDphiCogDsume"; + hSvc->BookHisto(hname, nBinX, minX, maxX); + hname = "SS2gXcog_passDtDphiCogFR"; + hSvc->BookHisto(hname, nBinX, minX, maxX); hname = "SS2gXcog_passDtDphi"; hSvc->BookHisto(hname, nBinX, minX, maxX); + hname = "SS2gYcog_passDtDphiCogDsume"; + hSvc->BookHisto(hname, nBinX, minX, maxX); + hname = "SS2gYcog_passDtDphiCogFR"; + hSvc->BookHisto(hname, nBinX, minX, maxX); hname = "SS2gYcog_passDtDphi"; hSvc->BookHisto(hname, nBinX, minX, maxX); hname = "SS2gXcog_passDt"; @@ -2623,7 +2812,6 @@ Bool_t EventSelection::InitHistosAnalysis() hname = "SS2gYcog_passDt"; hSvc->BookHisto(hname, nBinX, minX, maxX); - hname = "ECal2gsearch_ESumDt3"; hSvc->BookHisto(hname, nBinX, minX, maxX); hname = "ECal2gsearch_ESumDt3Phi100"; @@ -2819,8 +3007,8 @@ Bool_t EventSelection::InitHistosAnalysis() hSvc->BookHisto(hname, nBinX, minX, maxX); hname="TimeSpreadInECal"; nBinX= 200; - minX = -30.; - maxX = 30.; + minX = -300.; + maxX = 300.; hSvc->BookHisto(hname, nBinX, minX, maxX); hname="CellSpreadInECal"; nBinX= 30; From ce628b59b7c84dbc225dc68afda7f5ff06db7702 Mon Sep 17 00:00:00 2001 From: Stefania Spagnolo Date: Fri, 15 Nov 2019 12:46:01 +0100 Subject: [PATCH 07/64] update to the eventselection --- .../AnalysisBase/src/EventSelection.cc | 37 +++++++++++++++---- 1 file changed, 30 insertions(+), 7 deletions(-) diff --git a/PadmeAnalysis/AnalysisBase/src/EventSelection.cc b/PadmeAnalysis/AnalysisBase/src/EventSelection.cc index 12add70b..501670ab 100644 --- a/PadmeAnalysis/AnalysisBase/src/EventSelection.cc +++ b/PadmeAnalysis/AnalysisBase/src/EventSelection.cc @@ -341,6 +341,10 @@ Bool_t EventSelection::ProcessAnalysisSS() double impactPar=0.; + int nhit=fECal_hitEvent->GetNHits(); + hname="SS2g_nhits"; + hSvc->FillHisto(hname,float(nhit)); + int nclus=fECal_ClColl->GetNElements(); hname="SS2g_nclus"; hSvc->FillHisto(hname,float(nclus)); @@ -368,6 +372,12 @@ Bool_t EventSelection::ProcessAnalysisSS() hname = hprefix+"ECalClEnergy"; hSvc->FillHisto(hname, xEne); + int cls = xClu->GetNHitsInClus(); + hname = "SS2g_clSize"; + hSvc->FillHisto(hname, float(cls)); + hname = "SS2g_clSizeVsE"; + hSvc->FillHisto2(hname, xEne, float(cls)); + pos1 = xClu->GetPosition(); if (isMC) { @@ -468,7 +478,7 @@ Bool_t EventSelection::ProcessAnalysisSS() } - if (fabs(dt)<30.) + if (fabs(dt)<10.) { hname = "SS2gSumE_passDt"; hSvc->FillHisto(hname, aSumCl); @@ -571,7 +581,8 @@ Bool_t EventSelection::ProcessAnalysisSS() - if (fabs(pos1.x())>100. && fabs(pos2.x())>100. && fabs(pos1.y())<200. && fabs(pos2.y())<200.) { + // if (fabs(pos1.y())>120. && fabs(pos2.y())>120.) { + // if (fabs(pos1.x())>100. && fabs(pos2.x())>100. && fabs(pos1.y())<200. && fabs(pos2.y())<200.) { n2gFR+=1; hname = "SS2gSumE_passDtDphiCogFR"; hSvc->FillHisto(hname, aSumCl); @@ -640,12 +651,14 @@ Bool_t EventSelection::ProcessAnalysisSS() n2gDsume+=1; - - - + hname="SS2g_ClTime_passDtDphiCogDsume"; + hSvc->FillHisto(hname, xTime); + hname = "NposInBunch_beam_passDtDphiCogDsume"; + hSvc->FillHisto(hname,fTarget_RecoBeam->getnPOT()); + } - } + //} } } } @@ -2601,6 +2614,9 @@ Bool_t EventSelection::InitHistosAnalysis() hSvc->BookHisto(hname, 3, -1.5, 1.5); hname="NposInBunch_beam"; hSvc->BookHisto(hname, 500, 0., 30000.); + hname="NposInBunch_beam_passDtDphiCogDsume"; + hSvc->BookHisto(hname, 500, 0., 30000.); + hname="GammaSeleCutFlow"; hSvc->BookHisto(hname, 31, -0.5, 30.5); hname="GammaSeleCutFlow"; @@ -2660,6 +2676,12 @@ Bool_t EventSelection::InitHistosAnalysis() hSvc->BookHisto(hname, nBinX, minX, maxX); + hname="SS2g_nhits"; + hSvc->BookHisto(hname, 100, 0., 400.); + hname="SS2g_clSize"; + hSvc->BookHisto(hname, 31, -0.5, 30.5); + hname="SS2g_clSizeVsE"; + hSvc->BookHisto2(hname, 100, 0., 500., 31, -0.5, 30.5); hname="SS2g_nclus"; hSvc->BookHisto(hname, 50, 0.5, 50.5); hname="SS2g_nclus50"; @@ -2679,7 +2701,8 @@ Bool_t EventSelection::InitHistosAnalysis() hname="SS2g_ClTime"; hSvc->BookHisto(hname, 100, -500, 500); - + hname="SS2g_ClTime_passDtDphiCogDsume"; + hSvc->BookHisto(hname, 100, -500, 500); hname = "SS2gSumE_passDtDphiCogDsume"; hSvc->BookHisto(hname, nBinX, minX, maxX); From 54e1ceb1ae5be23966ca346a3ac2fb00776aa242 Mon Sep 17 00:00:00 2001 From: Stefania Spagnolo Date: Fri, 15 Nov 2019 13:16:22 +0100 Subject: [PATCH 08/64] plotting macros --- PadmeAnalysis/AnalysisTools/compare.C | 170 +++++++++++++++--- .../AnalysisTools/produceSelectionPlots.C | 153 ++++++++++++++++ 2 files changed, 294 insertions(+), 29 deletions(-) create mode 100644 PadmeAnalysis/AnalysisTools/produceSelectionPlots.C diff --git a/PadmeAnalysis/AnalysisTools/compare.C b/PadmeAnalysis/AnalysisTools/compare.C index 684e74b9..64158111 100644 --- a/PadmeAnalysis/AnalysisTools/compare.C +++ b/PadmeAnalysis/AnalysisTools/compare.C @@ -1,10 +1,53 @@ -void compare(std::string hname, double scalef=-1, TFile* fData=_file0, TFile* fMC=_file1) +#include +#include + +#include "Rtypes.h" + +#include "PadmeUtils.h" +#include "PadmeStyle.h" +#include "PadmeLabels.h" + +/* +#ifdef __CLING__ +// these are not headers - do not treat them as such - needed for ROOT6 +#include "PadmeLabels.C" +#include "PadmeUtils.C" +#endif +*/ + +#include "TCanvas.h" +#include "TFile.h" +#include "TROOT.h" +#include "TH1F.h" +#include "TRandom.h" +#include "TGraphErrors.h" + +using namespace std; + + +void compare(std::string hname, double scalef=-1, double xmin=-999, double xmax=999, + std::string xtitle="E(#gamma_{1})+E(#gamma_{2}) [MeV]", + TFile* fData=_file0, TFile* fMC=_file1) { // TFile* fData = _file0; // TFile* fMC = _file1; + + /* + #ifdef __CINT__ + gROOT->LoadMacro("PadmeLabels.C"); + gROOT->LoadMacro("PadmeUtils.C"); + #endif + + SetPadmeStyle(); + */ + + + std::cout<<" pointer to data file and mc file ... "<Get(hname.c_str()); TH1D* hMC = (TH1D*)fMC->Get(hname.c_str()); + TCanvas* cData = new TCanvas("data","data",500,500); hData->Draw(); @@ -40,22 +83,25 @@ void compare(std::string hname, double scalef=-1, TFile* fData=_file0, TFile* fM double ymax = ymaxMC; if (ymaxDT>ymax) ymax=ymaxDT; ymax = 1.3*ymax; - + TH1D* hFrame = hMC->Clone(); hFrame->Scale(0.); hFrame->SetMaximum(ymax); + hFrame->Draw(); - hsMC->SetLineColor(kBlack); + hsMC->SetLineColor(kRed); hsMC->Draw("SAME"); hData->SetMarkerStyle(20); - hData->SetMarkerSize(0.5); + //hData->SetMarkerSize(0.5); hData->SetMarkerColor(kBlack); hData->Draw("SAMEPE"); cout<<"Data/MC ratio = "<GetEntries()/hMC->GetEntries()<Draw(); // Define the Canvas - TCanvas *c = new TCanvas("c", "canvas", 800, 800); + TCanvas *c = new TCanvas("c", "My Final Plot", 50, 50, 600,600); // Upper plot will be in pad1 - TPad *pad1 = new TPad("pad1", "pad1", 0, 0.3, 1, 1.0); - pad1->SetBottomMargin(0); // Upper and lower plot are joined + TPad *pad1 = new TPad("pad1", "pad1", 0, 0.3, 1, 1.); + pad1->SetBottomMargin(0.02); // Upper and lower plot are joined + pad1->SetTopMargin(1); // Upper and lower plot are joined + //pad1->SetLogy(); //pad1->SetGridx(); // Vertical grid pad1->Draw(); // Draw the upper pad: pad1 pad1->cd(); // pad1 becomes the current pad @@ -91,14 +139,35 @@ void compare(std::string hname, double scalef=-1, TFile* fData=_file0, TFile* fM h1->Draw(); // Draw h1 h2->Draw("same"); // Draw h2 on top of h1 */ - hFrame->SetStats(0); + + Double_t ymin=0.; + ymin = hMC->GetYaxis()->GetXmin(); + if (pad1->GetLogy()) { + ymin = 1.; + ymax = 10*ymax; + } + + if (fabs(xmin+999)<0.0001 && fabs(xmax-999)<0.0001) { + xmin = hMC->GetXaxis()->GetXmin(); + xmax = hMC->GetXaxis()->GetXmax(); + } + // Double_t xmin=60.00; Double_t xmax=3500.; + TH1F *hFrame1 = pad1->DrawFrame(xmin,ymin,xmax,ymax); + std::cout<<"X range = "<GetXaxis()->SetLabelSize(0.); + hFrame1->SetStats(0); hsMC->SetStats(0); hData->SetStats(0); - hFrame->Draw(); + //hFrame1->Draw("SAME"); + //return; + hsMC->Draw("SAME"); hData->Draw("SAMEPE"); + // Do not draw the Y axis label on the upper plot and redraw a small // axis instead, in order to avoid the first label (0) to be clipped. //////////////hFrame->GetYaxis()->SetLabelSize(0.); @@ -109,9 +178,9 @@ void compare(std::string hname, double scalef=-1, TFile* fData=_file0, TFile* fM // lower plot will be in pad c->cd(); // Go back to the main canvas before defining pad2 - TPad *pad2 = new TPad("pad2", "pad2", 0, 0.05, 1, 0.3); - pad2->SetTopMargin(0); - pad2->SetBottomMargin(0.2); + TPad *pad2 = new TPad("pad2", "pad2", 0, 0.0, 1, 0.3); + pad2->SetTopMargin(0.02); + pad2->SetBottomMargin(0.3); //pad2->SetGridx(); // vertical grid pad2->SetGridy(); // horizontal grid pad2->Draw(); @@ -129,14 +198,18 @@ void compare(std::string hname, double scalef=-1, TFile* fData=_file0, TFile* fM h3->SetMarkerStyle(21); h3->Draw("ep"); // Draw the ratio plot */ + hratio->GetXaxis()->SetTitle("Y strip number"); pad2->SetLogy(); + TH1F *hFrameRat = pad2->DrawFrame(xmin,0.1,xmax,12.); + hFrameRat->GetXaxis()->SetTitle(xtitle.c_str()); + hFrameRat->Draw(); hratio->SetStats(0); - hratio->SetMaximum(10.); + hratio->SetMaximum(12.); hratio->SetMinimum(0.1); //hratio->SetMinimum(0.1); hratio->SetMarkerStyle(20); - hratio->SetMarkerSize(0.5); - hratio->Draw("ep"); + //hratio->SetMarkerSize(0.5); + hratio->Draw("epsame"); /* // h1 settings @@ -157,21 +230,60 @@ void compare(std::string hname, double scalef=-1, TFile* fData=_file0, TFile* fM //h3->SetTitle(""); // Remove the ratio title hratio->SetTitle(""); + // Y axis ratio plot settings - hratio->GetYaxis()->SetTitle("Data/MC"); - hratio->GetYaxis()->SetNdivisions(505); - hratio->GetYaxis()->SetTitleSize(20); - hratio->GetYaxis()->SetTitleFont(43); - hratio->GetYaxis()->SetTitleOffset(1.55); - hratio->GetYaxis()->SetLabelFont(43); // Absolute font size in pixel (precision 3) - hratio->GetYaxis()->SetLabelSize(15); + hFrameRat->GetYaxis()->SetTitle("Data/MC"); + hFrameRat->GetYaxis()->SetNdivisions(505); + hFrameRat->GetYaxis()->SetTitleSize(20); + hFrameRat->GetYaxis()->SetTitleFont(43); + hFrameRat->GetYaxis()->SetTitleOffset(1.55); + hFrameRat->GetYaxis()->SetLabelFont(43); // Absolute font size in pixel (precision 3) + hFrameRat->GetYaxis()->SetLabelSize(20); // X axis ratio plot settings - hratio->GetXaxis()->SetTitleSize(20); - hratio->GetXaxis()->SetTitleFont(43); - hratio->GetXaxis()->SetTitleOffset(4.); - hratio->GetXaxis()->SetLabelFont(43); // Absolute font size in pixel (precision 3) - hratio->GetXaxis()->SetLabelSize(15); + hFrameRat->GetXaxis()->SetTitleSize(20); + hFrameRat->GetXaxis()->SetTitleFont(43); + hFrameRat->GetXaxis()->SetTitleOffset(3.5); + hFrameRat->GetXaxis()->SetLabelFont(43); // Absolute font size in pixel (precision 3) + hFrameRat->GetXaxis()->SetLabelSize(20); + + pad1->cd(); + PADME_LABEL(0.2,0.8); myText( 0.33,0.8,1,"Internal"); + // ATLASLabel(0.2,0.8,"Work in progress");//,"Preliminary"); + myText( 0.2, 0.7 , 1, "nPOT=9.3#times10^{9}"); + // myText( 0.63, 0.7 , 1, "Diamond Target"); + // myText( 0.57, 0.8, 1, "Average beam Y profile"); + + //myMarkerText( 0.25, 0.5, 1, 20, "Data July 2019",1.3); + //myBoxText( 0.25, 0.45, 0.05, kBlack, "MC"); + TLegend* t = new TLegend(0.55,0.7,0.80,0.85); + t->AddEntry(hData,"data July 2019","ep"); + t->AddEntry(hsMC,"MC 20k e^{+}/bunch on target","l"); + t->SetTextAlign(12); + t->SetBorderSize(0); + t->SetFillStyle(0); + t->SetFillColor(0); + t->SetTextFont(43); + t->SetTextSize(15); + + t->Draw(); + + + c->SaveAs(("outpng/"+hname+".png").c_str()); + //c->SaveAs("myPlot.png"); + //c->SaveAs("myPlot.eps"); + //return; + + + delete pad1; + delete pad2; + delete cData; + delete cMC; + delete c; + delete c2; + delete t; + + std::cout<<"Bye, bye ................."<LoadMacro("PadmeLabels.C"); + gROOT->LoadMacro("PadmeUtils.C"); + gROOT->LoadMacro("PadmeStyle.C"); + #endif + + SetPadmeStyle(); + + + std::string xtitle; + double xmin=-999; + double xmax= 999; + + + xtitle = "Number of ECal hits"; + xmin = 0.; + xmax = 400.; + compare("SS2g_nhits", -1, xmin, xmax, xtitle, fData, fMC); + xtitle = "Cluster size"; + xmin = -999; + xmax = 999; + compare("SS2g_clSize", -1, xmin, xmax, xtitle, fData, fMC); + + //return; + + xtitle = "E(#gamma_{1}+E(#gamma_{2}) [MeV]"; + xmin = 0.; + xmax = 1200.; + compare("SS2gSumE_passDt", -1, xmin, xmax, xtitle, fData, fMC); + compare("SS2gSumE_passDtDphi", -1, xmin, xmax, xtitle, fData, fMC); + compare("SS2gSumE_passDtDphiCog", -1, xmin, xmax, xtitle, fData, fMC); + compare("SS2gSumE_passDtDphiCogFR", -1, xmin, xmax, xtitle, fData, fMC); + compare("SS2gSumE_passDtDphiCogDsume", -1, 400, 600, xtitle, fData, fMC); + //compare("SS2gSumE_passDtDphiCogDsume", 0.38, 400., 600, xtitle, fData, fMC); + //compare("SS2gSumE_passDtDphiCogDsume", 0.2144, 400., 600, xtitle, fData, fMC); + + + + xtitle = "#Deltat [ns]"; + xmin = -15.; + xmax = 15.; + compare("SS2gDt_passDtDphi", -1, xmin, xmax, xtitle, fData, fMC); + compare("SS2gDt_passDtDphiCog", -1, xmin, xmax, xtitle, fData, fMC); + compare("SS2gDt_passDtDphiCogFR", -1, xmin, xmax, xtitle, fData, fMC); + compare("SS2gDt_passDtDphiCogDsume", -1, xmin, xmax, xtitle, fData, fMC); + + // return; + + xmin = -150.; + xmax = 150.; + xtitle = "#Deltat (hit-hit, ECal) [ns]"; + compare("TimeSpreadInECal", -1, xmin, xmax, xtitle, fData, fMC); + xmin = -15.; + xmax = 15.; + xtitle = "#Deltat (in ECal clusters) [ns]"; + compare("TimeSpreadInECalClus", -1, xmin, xmax, xtitle, fData, fMC); + + xmin = -100.; + xmax = 400.; + xtitle = "T(#gamma_{1}) (E>50MeV)[ns]"; + compare("SS2g_ClTime", -1, xmin, xmax, xtitle, fData, fMC); + xtitle = "T(#gamma_{1}) (#gamma#gamma candidate)[ns]"; + compare("SS2g_ClTime_passDtDphiCogDsume", -1, xmin, xmax, xtitle, fData, fMC); + + xmin=50.; + xmax=500.; + xtitle = "E(#gamma) [MeV]"; + compare("SS2gE_passDtDphiCogDsume", -1, xmin, xmax, xtitle, fData, fMC); + compare("SS2gE_passDtDphiCogFR", -1, xmin, xmax, xtitle, fData, fMC); + compare("SS2gE_passDtDphiCog", -1, xmin, xmax, xtitle, fData, fMC); + compare("SS2gE_passDtDphi", -1, xmin, xmax, xtitle, fData, fMC); + compare("SS2gE_passDt", -1, xmin, xmax, xtitle, fData, fMC); + + xtitle = "#Delta#phi [deg]"; + xmax=180.2; + compare("SS2gDphi_passDt", -1, xmin, xmax, xtitle, fData, fMC); + xmin=129.8; + compare("SS2gDphi_passDtDphi", -1, xmin, xmax, xtitle, fData, fMC); + compare("SS2gDphi_passDtDphiCog", -1, xmin, xmax, xtitle, fData, fMC); + compare("SS2gDphi_passDtDphiCogFR", -1, xmin, xmax, xtitle, fData, fMC); + compare("SS2gDphi_passDtDphiCogDsume", -1, xmin, xmax, xtitle, fData, fMC); + + xmin = -250.; + xmax = 250.; + xtitle = "X centroid [mm]"; + compare("SS2gXcog_passDt", -1, xmin, xmax, xtitle, fData, fMC); + compare("SS2gXcog_passDtDphi", -1, xmin, xmax, xtitle, fData, fMC); + xmin = -40.; + xmax = 40.; + compare("SS2gXcog_passDtDphiCogFR", -1, xmin, xmax, xtitle, fData, fMC); + compare("SS2gXcog_passDtDphiCogDsume", -1, xmin, xmax, xtitle, fData, fMC); + + + xmin = -250.; + xmax = 250.; + xtitle = "Y centroid [mm]"; + compare("SS2gYcog_passDt", -1, xmin, xmax, xtitle, fData, fMC); + compare("SS2gYcog_passDtDphi", -1, xmin, xmax, xtitle, fData, fMC); + xmin = -40.; + xmax = 40.; + compare("SS2gYcog_passDtDphiCogFR", -1, xmin, xmax, xtitle, fData, fMC); + compare("SS2gYcog_passDtDphiCogDsume", -1, xmin, xmax, xtitle, fData, fMC); + + + xmin = -350.; + xmax = 350.; + xtitle = "X [mm]"; + compare("SS2gX_passDt", -1, xmin, xmax, xtitle, fData, fMC); + compare("SS2gX_passDtDphi", -1, xmin, xmax, xtitle, fData, fMC); + compare("SS2gX_passDtDphiCog", -1, xmin, xmax, xtitle, fData, fMC); + compare("SS2gX_passDtDphiCogFR", -1, xmin, xmax, xtitle, fData, fMC); + compare("SS2gX_passDtDphiCogDsume", -1, xmin, xmax, xtitle, fData, fMC); + + + xtitle = "Y [mm]"; + compare("SS2gY_passDt", -1, xmin, xmax, xtitle, fData, fMC); + compare("SS2gY_passDtDphi", -1, xmin, xmax, xtitle, fData, fMC); + compare("SS2gY_passDtDphiCog", -1, xmin, xmax, xtitle, fData, fMC); + compare("SS2gY_passDtDphiCogFR", -1, xmin, xmax, xtitle, fData, fMC); + compare("SS2gY_passDtDphiCogDsume", -1, xmin, xmax, xtitle, fData, fMC); + + xmin = -350.; + xmax = 350.; + xtitle = "X (energy weigthed) [mm]"; + compare("SS2gXEw_passDt", -1, xmin, xmax, xtitle, fData, fMC); + compare("SS2gXEw_passDtDphi", -1, xmin, xmax, xtitle, fData, fMC); + compare("SS2gXEw_passDtDphiCog", -1, xmin, xmax, xtitle, fData, fMC); + compare("SS2gXEw_passDtDphiCogFR", -1, xmin, xmax, xtitle, fData, fMC); + compare("SS2gXEw_passDtDphiCogDsume", -1, xmin, xmax, xtitle, fData, fMC); + + + xtitle = "Y (energy weigthed) [mm]"; + compare("SS2gYEw_passDt", -1, xmin, xmax, xtitle, fData, fMC); + compare("SS2gYEw_passDtDphi", -1, xmin, xmax, xtitle, fData, fMC); + compare("SS2gYEw_passDtDphiCog", -1, xmin, xmax, xtitle, fData, fMC); + compare("SS2gYEw_passDtDphiCogFR", -1, xmin, xmax, xtitle, fData, fMC); + compare("SS2gYEw_passDtDphiCogDsume", -1, xmin, xmax, xtitle, fData, fMC); + + +} + From cafe54618d07c6a37bbce02f15591f65a51bf3b7 Mon Sep 17 00:00:00 2001 From: Emanuele Leonardi Date: Fri, 15 Nov 2019 14:18:02 +0100 Subject: [PATCH 09/64] Added option to have magnetic field in the whole cross region --- PadmeMC/Chamber/include/ChamberGeometry.hh | 8 + PadmeMC/Chamber/include/ChamberStructure.hh | 9 +- PadmeMC/Chamber/src/ChamberGeometry.cc | 1 + PadmeMC/Chamber/src/ChamberStructure.cc | 18 ++- PadmeMC/include/DetectorConstruction.hh | 4 + PadmeMC/include/DetectorMessenger.hh | 2 + PadmeMC/src/DetectorConstruction.cc | 156 +++++++++++++++----- PadmeMC/src/DetectorMessenger.cc | 12 ++ PadmeMC/vis.mac.example | 3 + 9 files changed, 163 insertions(+), 50 deletions(-) diff --git a/PadmeMC/Chamber/include/ChamberGeometry.hh b/PadmeMC/Chamber/include/ChamberGeometry.hh index 894bfc40..22687c1c 100644 --- a/PadmeMC/Chamber/include/ChamberGeometry.hh +++ b/PadmeMC/Chamber/include/ChamberGeometry.hh @@ -44,6 +44,11 @@ public: G4double GetVCInnerSizeX() { return fVCInnerSizeX; } G4double GetVCInnerSizeY() { return fVCInnerSizeY; } G4double GetVCInnerFacePosZ() { return fVCInnerFacePosZ; } + G4double GetVCOuterFacePosZ() { return fVCOuterFacePosZ; } + + // Position of front face of target cross + // Used to create magnetic volume in the cross region + G4double GetCrossFrontFacePosZ() { return fCPZPosZ-0.5*fCPZLength; } // Properties of aluminum|carbon thin window and its flange @@ -90,6 +95,8 @@ public: G4double GetJunFlangeRIn() { return GetJunROut(); } G4double GetJunFlangeROut() { return fJunFlangeR; } G4double GetJunFlangeThick() { return fJunFlangeThick; } + G4double GetJunFrontFacePosZ() { return fJunFrontFacePosZ; } + G4double GetJunBackFacePosZ() { return fJunBackFacePosZ; } // Properties of junction pipe between cross and BTF pipes @@ -176,6 +183,7 @@ private: G4double fVCInnerSizeX; // Internal size along X of the rectangular section inside the magnet G4double fVCInnerSizeY; // Internal size along Y of the rectangular section inside the magnet G4double fVCInnerFacePosZ; // Position along Z of the internal face of the rectangular section inside the magnet + G4double fVCOuterFacePosZ; // Position along Z of the external face of the rectangular section inside the magnet // Aluminum|Carbon thin window and aluminum flange diff --git a/PadmeMC/Chamber/include/ChamberStructure.hh b/PadmeMC/Chamber/include/ChamberStructure.hh index b77c4d75..a86c4a16 100644 --- a/PadmeMC/Chamber/include/ChamberStructure.hh +++ b/PadmeMC/Chamber/include/ChamberStructure.hh @@ -22,7 +22,8 @@ public: ~ChamberStructure(); ChamberStructure(G4LogicalVolume*); - void SetMotherVolume(G4LogicalVolume* v) { fMotherVolume = v; } + void SetMotherVolume(G4LogicalVolume* v) { fMotherVolume = v; } + void SetCrossMotherVolume(G4LogicalVolume* v) { fCrossMotherVolume = v; } void CreateGeometry(); //G4double GetChamberMostExternalX(); @@ -43,6 +44,9 @@ public: void SetChamberVisible() { fChamberIsVisible = 1; } void SetChamberInvisible() { fChamberIsVisible = 0; } + // Define displacement of Cross region along Z due to position of magnetic volume + void SetCrossDisplacePosZ(G4double z) { fCrossDisplacePosZ = z; } + private: G4UnionSolid* CreateVCFacetGlobalSolid(); @@ -58,8 +62,11 @@ private: void CreateTPixPortholeCap(); G4LogicalVolume* fMotherVolume; + G4LogicalVolume* fCrossMotherVolume; G4LogicalVolume* fGlobalLogicalVolume; + G4double fCrossDisplacePosZ; // Displacement of Cross region along Z due to positioning inside magnetic volume + G4int fChamberExists; G4int fChamberIsVisible; diff --git a/PadmeMC/Chamber/src/ChamberGeometry.cc b/PadmeMC/Chamber/src/ChamberGeometry.cc index 8fd362bc..c9f89bdd 100644 --- a/PadmeMC/Chamber/src/ChamberGeometry.cc +++ b/PadmeMC/Chamber/src/ChamberGeometry.cc @@ -33,6 +33,7 @@ ChamberGeometry::ChamberGeometry() fVCInnerSizeX = 435.0*mm; // Internal size along X of the rectangular section inside the magnet fVCInnerSizeY = 205.0*mm; // Internal size along Y of the rectangular section inside the magnet fVCInnerFacePosZ = -490.0*mm; // Position along Z of the internal face of the rectangular section inside the magnet + fVCOuterFacePosZ = -500.001*mm; // Position along Z of the external face of the rectangular section inside the magnet // Parameters for the aluminum|carbon thin window diff --git a/PadmeMC/Chamber/src/ChamberStructure.cc b/PadmeMC/Chamber/src/ChamberStructure.cc index 9ed098dc..da6c9484 100644 --- a/PadmeMC/Chamber/src/ChamberStructure.cc +++ b/PadmeMC/Chamber/src/ChamberStructure.cc @@ -39,6 +39,8 @@ ChamberStructure::ChamberStructure(G4LogicalVolume* motherVolume) { fChamberExists = 1; // If =0 the physical structure of the chamber is not created fChamberIsVisible = 1; // If =0 all chamber structures are invisible (debug only) + + fCrossDisplacePosZ = 0.; // Displacement of Cross region due to positioning inside magnetic volume } ChamberStructure::~ChamberStructure() @@ -206,7 +208,7 @@ void ChamberStructure::CreateTargetPipes() G4SubtractionSolid* solidCP3 = new G4SubtractionSolid("VCCP3",solidCP2,solidCPXi,rotCPX,G4ThreeVector(0.,0.,0.)); G4LogicalVolume* logicalCP = new G4LogicalVolume(solidCP3,G4Material::GetMaterial("G4_STAINLESS-STEEL"),"VCCP",0,0,0); logicalCP->SetVisAttributes(steelVisAttr); - new G4PVPlacement(0,G4ThreeVector(0.,0.,cpzPosZ),logicalCP,"CrossPipeSteel",fMotherVolume,false,0,true); + new G4PVPlacement(0,G4ThreeVector(0.,0.,cpzPosZ)-G4ThreeVector(0.,0.,fCrossDisplacePosZ),logicalCP,"CrossPipeSteel",fCrossMotherVolume,false,0,true); // Create flanges for crossed pipe @@ -217,9 +219,9 @@ void ChamberStructure::CreateTargetPipes() G4LogicalVolume* logicalFlangeZ = new G4LogicalVolume(solidFlangeZ,G4Material::GetMaterial("G4_STAINLESS-STEEL"),"JunFlangeZ",0,0,0); logicalFlangeZ->SetVisAttributes(steelVisAttr); G4double flangez0PosZ = cpzPosZ-0.5*cpzLen+0.5*flangezThick; - new G4PVPlacement(0,G4ThreeVector(0.,0.,flangez0PosZ),logicalFlangeZ,"CPZFlange",fMotherVolume,false,0,true); + new G4PVPlacement(0,G4ThreeVector(0.,0.,flangez0PosZ)-G4ThreeVector(0.,0.,fCrossDisplacePosZ),logicalFlangeZ,"CPZFlange",fCrossMotherVolume,false,0,true); G4double flangez1PosZ = cpzPosZ+0.5*cpzLen-0.5*flangezThick; - new G4PVPlacement(0,G4ThreeVector(0.,0.,flangez1PosZ),logicalFlangeZ,"CPZFlange",fMotherVolume,false,1,true); + new G4PVPlacement(0,G4ThreeVector(0.,0.,flangez1PosZ)-G4ThreeVector(0.,0.,fCrossDisplacePosZ),logicalFlangeZ,"CPZFlange",fCrossMotherVolume,false,1,true); G4double flangexRIn = geo->GetCPXFlangeRIn(); G4double flangexROut = geo->GetCPXFlangeROut(); @@ -229,10 +231,10 @@ void ChamberStructure::CreateTargetPipes() logicalFlangeX->SetVisAttributes(steelVisAttr); G4double flangex0PosX = -0.5*cpxLen+0.5*flangexThick; G4double flangex0PosZ = cpzPosZ; - new G4PVPlacement(rotCPX,G4ThreeVector(flangex0PosX,0.,flangex0PosZ),logicalFlangeX,"CPXFlange",fMotherVolume,false,0,true); + new G4PVPlacement(rotCPX,G4ThreeVector(flangex0PosX,0.,flangex0PosZ)-G4ThreeVector(0.,0.,fCrossDisplacePosZ),logicalFlangeX,"CPXFlange",fCrossMotherVolume,false,0,true); G4double flangex1PosX = +0.5*cpxLen-0.5*flangexThick; G4double flangex1PosZ = cpzPosZ; - new G4PVPlacement(rotCPX,G4ThreeVector(flangex1PosX,0.,flangex1PosZ),logicalFlangeX,"CPXFlange",fMotherVolume,false,1,true); + new G4PVPlacement(rotCPX,G4ThreeVector(flangex1PosX,0.,flangex1PosZ)-G4ThreeVector(0.,0.,fCrossDisplacePosZ),logicalFlangeX,"CPXFlange",fCrossMotherVolume,false,1,true); } @@ -254,7 +256,7 @@ void ChamberStructure::CreateJunctionPipe() G4Tubs* solidJun = new G4Tubs("JunPipe",junRIn,junROut,0.5*junLen,0.*deg,360.*deg); G4LogicalVolume* logicalJun = new G4LogicalVolume(solidJun,G4Material::GetMaterial("G4_STAINLESS-STEEL"),"JunPipe",0,0,0); logicalJun->SetVisAttributes(steelVisAttr); - new G4PVPlacement(0,G4ThreeVector(0.,0.,junPosZ),logicalJun,"JunctionPipe",fMotherVolume,false,0,true); + new G4PVPlacement(0,G4ThreeVector(0.,0.,junPosZ)-G4ThreeVector(0.,0.,fCrossDisplacePosZ),logicalJun,"JunctionPipe",fCrossMotherVolume,false,0,true); printf("Junction pipe RIn %.1fmm Rout %.1fmm Zlen %.3fmm Zpos %.3fmm\n",junRIn/mm,junROut/mm,junLen/mm,junPosZ/mm); @@ -266,9 +268,9 @@ void ChamberStructure::CreateJunctionPipe() G4LogicalVolume* logicalFlange = new G4LogicalVolume(solidFlange,G4Material::GetMaterial("G4_STAINLESS-STEEL"),"JunFlange",0,0,0); logicalFlange->SetVisAttributes(steelVisAttr); G4double flange0PosZ = junPosZ-0.5*junLen+0.5*flangeThick; - new G4PVPlacement(0,G4ThreeVector(0.,0.,flange0PosZ),logicalFlange,"JunctionBFlange",fMotherVolume,false,0,true); + new G4PVPlacement(0,G4ThreeVector(0.,0.,flange0PosZ)-G4ThreeVector(0.,0.,fCrossDisplacePosZ),logicalFlange,"JunctionBFlange",fCrossMotherVolume,false,0,true); G4double flange1PosZ = junPosZ+0.5*junLen-0.5*flangeThick; - new G4PVPlacement(0,G4ThreeVector(0.,0.,flange1PosZ),logicalFlange,"JunctionFFlange",fMotherVolume,false,0,true); + new G4PVPlacement(0,G4ThreeVector(0.,0.,flange1PosZ)-G4ThreeVector(0.,0.,fCrossDisplacePosZ),logicalFlange,"JunctionFFlange",fCrossMotherVolume,false,0,true); } diff --git a/PadmeMC/include/DetectorConstruction.hh b/PadmeMC/include/DetectorConstruction.hh index aee8a6d7..7feee28c 100644 --- a/PadmeMC/include/DetectorConstruction.hh +++ b/PadmeMC/include/DetectorConstruction.hh @@ -65,6 +65,8 @@ public: void EnableMagneticField(); void DisableMagneticField(); + void SetCrossMagneticVolume(G4String); + void MagneticVolumeIsVisible(); void MagneticVolumeIsInvisible(); @@ -140,6 +142,8 @@ private: G4int fEnableMagneticField; G4int fMagneticVolumeIsVisible; + G4String fCrossMagneticVolume; + G4int fMagnetIsVisible; G4int fChamberIsVisible; G4int fBeamLineIsVisible; //M. Raggi 07/03/2019 diff --git a/PadmeMC/include/DetectorMessenger.hh b/PadmeMC/include/DetectorMessenger.hh index e558aaa7..96ba3d6f 100644 --- a/PadmeMC/include/DetectorMessenger.hh +++ b/PadmeMC/include/DetectorMessenger.hh @@ -38,6 +38,8 @@ private: G4UIcmdWithoutParameter* fEnableMagFieldCmd; G4UIcmdWithoutParameter* fDisableMagFieldCmd; + G4UIcmdWithAString* fCrossMagVolCmd; + G4UIcmdWithoutParameter* fMagVolVisibleCmd; G4UIcmdWithoutParameter* fMagVolInvisibleCmd; diff --git a/PadmeMC/src/DetectorConstruction.cc b/PadmeMC/src/DetectorConstruction.cc index 71e77fb4..0d74fd75 100644 --- a/PadmeMC/src/DetectorConstruction.cc +++ b/PadmeMC/src/DetectorConstruction.cc @@ -118,6 +118,7 @@ DetectorConstruction::DetectorConstruction() fEnableMagneticField = 1; fMagneticVolumeIsVisible = 0; + fCrossMagneticVolume = "internal"; fWorldIsFilledWithAir = 0; @@ -224,20 +225,6 @@ G4VPhysicalVolume* DetectorConstruction::Construct() fBeamLineStructure->CreateGeometry(); } - // Vacuum chamber structure - if (fEnableChamber) { - fChamberStructure->EnableChamber(); - } else { - fChamberStructure->DisableChamber(); - } - if (fChamberIsVisible) { - fChamberStructure->SetChamberVisible(); - } else { - fChamberStructure->SetChamberInvisible(); - } - fChamberStructure->SetMotherVolume(logicWorld); - fChamberStructure->CreateGeometry(); - // Create magnetic volume inside vacuum chamber G4double magVolMinX = -0.5*geoChamber->GetVCInnerSizeX()+1.*um; @@ -289,9 +276,13 @@ G4VPhysicalVolume* DetectorConstruction::Construct() //G4ThreeVector magVolPos = G4ThreeVector(0.,0.,0.); //new G4PVPlacement(magVolRot,magVolPos,logicMagneticVolumeVC,"MagneticVolumeVC",logicWorld,false,0,true); - // Compromise to save goat and cabbages - // Some fine adjustments to improve volume matching after rotation + // Compromise solution + + // Create the initial standard box sothat E/PVeto will not require displacement/rotation G4Box* solidMagVol1 = new G4Box("MagVol1",magVolHLX,magVolHLY,magVolHLZ); + + // Add the triangular shape in the forward region + // Some fine adjustments to improve volume matching after rotation std::vector magVolShape(4); magVolShape[0] = G4TwoVector(magVolMaxX-30.*um, magVolMaxZ-20.*um); magVolShape[1] = G4TwoVector(551.0*mm, magVolMaxZ-20.*um); @@ -301,22 +292,79 @@ G4VPhysicalVolume* DetectorConstruction::Construct() G4RotationMatrix* magVol2Rot = new G4RotationMatrix; magVol2Rot->rotateX(-90.*deg); G4ThreeVector magVol2Pos = G4ThreeVector(0.,0.,-magVolPosZ); - G4UnionSolid* solidMagneticVolume = new G4UnionSolid("MagneticVolume",solidMagVol1,solidMagVol2,magVol2Rot,magVol2Pos); + G4UnionSolid* solidMagVol3 = new G4UnionSolid("MagneticVolume",solidMagVol1,solidMagVol2,magVol2Rot,magVol2Pos); + + // Add cylinder at entrance hole up to end of vacuum chamber flange + G4double ehRIn = geoChamber->GetCPZRIn(); + G4double ehLen = geoChamber->GetVCInnerFacePosZ()-geoChamber->GetJunBackFacePosZ(); + G4Tubs* solidMagVol4 = new G4Tubs("CPZ",0.,ehRIn-1.*um,0.5*ehLen,0.*deg,360.*deg); + G4ThreeVector magVol4Pos = G4ThreeVector(0.,0.,0.5*(geoChamber->GetVCInnerFacePosZ()+geoChamber->GetJunBackFacePosZ())-magVolPosZ+1.*um); + G4UnionSolid* solidMagneticVolume = new G4UnionSolid("MagneticVolume",solidMagVol3,solidMagVol4,0,magVol4Pos); + G4LogicalVolume* logicMagneticVolumeVC = new G4LogicalVolume(solidMagneticVolume,G4Material::GetMaterial("Vacuum"),"MagneticVolumeVC",0,0,0); if (! fMagneticVolumeIsVisible) logicMagneticVolumeVC->SetVisAttributes(G4VisAttributes::Invisible); new G4PVPlacement(0,magVolPos,logicMagneticVolumeVC,"MagneticVolumeVC",logicWorld,false,0,true); - // Create magnetic volume inside beam entrance pipe + // Magnetic volume in the target cross region and its position + G4LogicalVolume* logicMagneticVolumeCross; + G4ThreeVector positionMagneticVolumeCross; + + if ( fCrossMagneticVolume == "internal" ) { + + // Create magnetic volume inside beam entrance pipe + + G4double cpzRIn = geoChamber->GetCPZRIn(); + //G4double cpzLen = 46.*cm; // Length is set to not include the instrumented section of the target + //G4double cpzLen = 49.*cm; // Length is set to not include the instrumented section of the target + G4double cpzLen = geoChamber->GetJunBackFacePosZ()-geoChamber->GetCPZPosZ()-5.*mm; // Length is set to not include the instrumented section of the target + G4Tubs* cpzSolid = new G4Tubs("CPZ",0.,cpzRIn-1.*um,0.5*cpzLen,0.*deg,360.*deg); + + //positionMagneticVolumeCross = G4ThreeVector(0.,0.,geoChamber->GetVCOuterFacePosZ()-0.5*cpzLen); + positionMagneticVolumeCross = G4ThreeVector(0.,0.,geoChamber->GetJunBackFacePosZ()-0.5*cpzLen-1.*um); + logicMagneticVolumeCross = + new G4LogicalVolume(cpzSolid,G4Material::GetMaterial("Vacuum"),"MagneticVolumeCross",0,0,0); + + } else { + + // Create a box with XY section matching that of the volume inside the vacuum chamber + // and position it to include the whole Cross region + + G4double cmvMinX = magVolMinX; + G4double cmvMinY = magVolMinY; + G4double cmvMinZ = geoChamber->GetCrossFrontFacePosZ(); + + G4double cmvMaxX = magVolMaxX; + G4double cmvMaxY = magVolMaxY; + G4double cmvMaxZ = geoChamber->GetVCOuterFacePosZ(); + + G4double cmvHLX = 0.5*(cmvMaxX-cmvMinX); + G4double cmvHLY = 0.5*(cmvMaxY-cmvMinY); + G4double cmvHLZ = 0.5*(cmvMaxZ-cmvMinZ); - G4double cpzRIn = geoChamber->GetCPZRIn(); - //G4double cpzLen = 46.*cm; // Length is set to not include the instrumented section of the target - G4double cpzLen = 49.*cm; // Length is set to not include the instrumented section of the target - G4Tubs* cpzSolid = new G4Tubs("CPZ",0.,cpzRIn-1.*um,0.5*cpzLen,0.*deg,360.*deg); - G4ThreeVector cpzPos(0.,0.,geoChamber->GetVCInnerFacePosZ()-0.5*cpzLen); - G4LogicalVolume* logicMagneticVolumeCP = - new G4LogicalVolume(cpzSolid,G4Material::GetMaterial("Vacuum"),"MagneticVolumeCP",0,0,0); - if (! fMagneticVolumeIsVisible) logicMagneticVolumeCP->SetVisAttributes(G4VisAttributes::Invisible); - new G4PVPlacement(0,cpzPos,logicMagneticVolumeCP,"MagneticVolumeCP",logicWorld,false,0,true); + G4Box* cmvSolid1 = new G4Box("CMV1",cmvHLX,cmvHLY,cmvHLZ); + + // Subtract cylinders corresponding to vacuum chamber flange and pipe + G4double flgR = geoChamber->GetJunFlangeROut(); + G4double flgL = geoChamber->GetJunFlangeThick()-2.*mm; // Apparently the flange thickness is less than 2cm + G4Tubs* cmvSolid2 = new G4Tubs("CMV2",0.,flgR,0.5*flgL,0.*deg,360.*deg); + G4double pipR = geoChamber->GetJunROut(); + G4double pipL = geoChamber->GetVCOuterFacePosZ()-geoChamber->GetJunBackFacePosZ()-flgL; + G4Tubs* cmvSolid3 = new G4Tubs("CMV3",0.,pipR,0.5*pipL,0.*deg,360.*deg); + + G4SubtractionSolid* cmvSolid4 = new G4SubtractionSolid("CMV4",cmvSolid1,cmvSolid2,0,G4ThreeVector(0.,0.,cmvHLZ-pipL-0.5*flgL)); + G4SubtractionSolid* cmvSolid5 = new G4SubtractionSolid("CMV5",cmvSolid4,cmvSolid3,0,G4ThreeVector(0.,0.,cmvHLZ-0.5*pipL)); + + G4double cmvPosX = 0.5*(cmvMaxX+cmvMinX); + G4double cmvPosY = 0.5*(cmvMaxY+cmvMinY); + G4double cmvPosZ = 0.5*(cmvMaxZ+cmvMinZ); + positionMagneticVolumeCross = G4ThreeVector(cmvPosX,cmvPosY,cmvPosZ); + logicMagneticVolumeCross = new G4LogicalVolume(cmvSolid5,G4Material::GetMaterial("Vacuum"),"MagneticVolumeCross",0,0,0); + + } + + if (! fMagneticVolumeIsVisible) + logicMagneticVolumeCross->SetVisAttributes(G4VisAttributes::Invisible); + new G4PVPlacement(0,positionMagneticVolumeCross,logicMagneticVolumeCross,"MagneticVolumeCross",logicWorld,false,0,true); // Add magnetic field to volumes if (fEnableMagneticField) { @@ -324,15 +372,36 @@ G4VPhysicalVolume* DetectorConstruction::Construct() if (fVerbose) printf("Enabling Magnetic Field with constant value %7.3f gauss\n",fMagneticFieldManager->GetMagneticField()->GetConstantMagneticFieldValue()/gauss); logicMagneticVolumeVC->SetFieldManager(fMagneticFieldManager->GetLocalFieldManager(),true); - logicMagneticVolumeCP->SetFieldManager(fMagneticFieldManager->GetLocalFieldManager(),true); + logicMagneticVolumeCross->SetFieldManager(fMagneticFieldManager->GetLocalFieldManager(),true); } - // Tungsten target dump - if (fEnableTungsten) { - fTungstenDetector->SetMotherVolume(logicMagneticVolumeCP); - fTungstenDetector->SetTungstenDisplacePosZ(cpzPos.z()); // Take into account magnetic volume displacement - fTungstenDetector->CreateGeometry(); + // Vacuum chamber structure + if (fEnableChamber) { + fChamberStructure->EnableChamber(); + } else { + fChamberStructure->DisableChamber(); + } + if (fChamberIsVisible) { + fChamberStructure->SetChamberVisible(); + } else { + fChamberStructure->SetChamberInvisible(); + } + fChamberStructure->SetMotherVolume(logicWorld); + if ( fCrossMagneticVolume == "internal" ) { + fChamberStructure->SetCrossMotherVolume(logicWorld); + fChamberStructure->SetCrossDisplacePosZ(0.); + } else { + fChamberStructure->SetCrossMotherVolume(logicMagneticVolumeCross); + fChamberStructure->SetCrossDisplacePosZ(positionMagneticVolumeCross.z()); } + fChamberStructure->CreateGeometry(); + + //// Tungsten target dump + //if (fEnableTungsten) { + // fTungstenDetector->SetMotherVolume(logicMagneticVolumeCP); + // fTungstenDetector->SetTungstenDisplacePosZ(cpzPos.z()); // Take into account magnetic volume displacement + // fTungstenDetector->CreateGeometry(); + //} // Concrete wall at large Z if (fEnableWall) { @@ -354,14 +423,13 @@ G4VPhysicalVolume* DetectorConstruction::Construct() // Target if (fEnableTarget) { - - // Should target be included in the magnetic volume, do not forget to take into account its displacement - //fTargetDetector->SetMotherVolume(logicMagneticVolumeCP); - //fTargetDetector->SetTargetDisplacePosZ(cpzPos.z()); - - fTargetDetector->SetMotherVolume(logicWorld); - fTargetDetector->SetTargetDisplacePosZ(0.); - + if ( fCrossMagneticVolume == "internal" ) { + fTargetDetector->SetMotherVolume(logicWorld); + fTargetDetector->SetTargetDisplacePosZ(0.); + } else { + fTargetDetector->SetMotherVolume(logicMagneticVolumeCross); + fTargetDetector->SetTargetDisplacePosZ(positionMagneticVolumeCross.z()); + } fTargetDetector->CreateGeometry(); } @@ -794,6 +862,12 @@ void DetectorConstruction::MagneticVolumeIsInvisible() fMagneticVolumeIsVisible = 0; } +void DetectorConstruction::SetCrossMagneticVolume(G4String str) +{ + if (fVerbose) printf("Magnetic volume in the Cross region is %s\n",str.data()); + fCrossMagneticVolume = str; +} + void DetectorConstruction::SetMagFieldValue(G4double v) { if (fVerbose) printf("Setting constant value of magnetic field to %f\n",v); diff --git a/PadmeMC/src/DetectorMessenger.cc b/PadmeMC/src/DetectorMessenger.cc index 83af7552..8c8173fe 100644 --- a/PadmeMC/src/DetectorMessenger.cc +++ b/PadmeMC/src/DetectorMessenger.cc @@ -45,6 +45,15 @@ DetectorMessenger::DetectorMessenger(DetectorConstruction* myDet) fDisableMagFieldCmd->SetGuidance("Disable magnetic field in simulation."); fDisableMagFieldCmd->AvailableForStates(G4State_PreInit,G4State_Idle); + fCrossMagVolCmd = new G4UIcmdWithAString("/Detector/CrossMagneticVolume",this); + fCrossMagVolCmd->SetGuidance("Define magnetic volume to be used in the Cross (Target) zone."); + fCrossMagVolCmd->SetGuidance("Possible choices are:"); + fCrossMagVolCmd->SetGuidance("internal magnetic field only inside the beam pipe (old style)."); + fCrossMagVolCmd->SetGuidance("external the full cross region is included in the magnetic field."); + fCrossMagVolCmd->SetParameterName("CMV",false); + fCrossMagVolCmd->SetCandidates("internal external"); + fCrossMagVolCmd->AvailableForStates(G4State_PreInit,G4State_Idle); + fMagVolVisibleCmd = new G4UIcmdWithoutParameter("/Detector/SetMagneticVolumeVisible",this); fMagVolVisibleCmd->SetGuidance("Make magnetic volume visible."); fMagVolVisibleCmd->AvailableForStates(G4State_PreInit,G4State_Idle); @@ -112,6 +121,7 @@ DetectorMessenger::~DetectorMessenger() delete fDisableStructCmd; delete fEnableMagFieldCmd; delete fDisableMagFieldCmd; + delete fCrossMagVolCmd; delete fMagVolVisibleCmd; delete fMagVolInvisibleCmd; delete fMagnetVisibleCmd; @@ -141,6 +151,8 @@ void DetectorMessenger::SetNewValue(G4UIcommand* command,G4String newValue) if( command == fEnableMagFieldCmd ) fDetector->EnableMagneticField(); if( command == fDisableMagFieldCmd ) fDetector->DisableMagneticField(); + if ( command == fCrossMagVolCmd ) fDetector->SetCrossMagneticVolume(newValue); + if( command == fMagVolVisibleCmd ) fDetector->MagneticVolumeIsVisible(); if( command == fMagVolInvisibleCmd ) fDetector->MagneticVolumeIsInvisible(); diff --git a/PadmeMC/vis.mac.example b/PadmeMC/vis.mac.example index 7bb195b6..ca7dd903 100644 --- a/PadmeMC/vis.mac.example +++ b/PadmeMC/vis.mac.example @@ -111,6 +111,9 @@ #/Detector/DisableMagneticField #/Detector/SetMagneticFieldValue 0.4542 tesla +#/Detector/CrossMagneticVolume internal +#/Detector/CrossMagneticVolume external + #/Detector/SetMagneticVolumeVisible #/Detector/SetMagneticVolumeInvisible From 1569a5b535c639f57feca4aac01dd9cff317c001 Mon Sep 17 00:00:00 2001 From: Emanuele Leonardi Date: Mon, 18 Nov 2019 10:34:16 +0100 Subject: [PATCH 10/64] Fixed flange thickness hole in cross magnetic field --- PadmeMC/src/DetectorConstruction.cc | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/PadmeMC/src/DetectorConstruction.cc b/PadmeMC/src/DetectorConstruction.cc index 0d74fd75..3395304e 100644 --- a/PadmeMC/src/DetectorConstruction.cc +++ b/PadmeMC/src/DetectorConstruction.cc @@ -345,7 +345,9 @@ G4VPhysicalVolume* DetectorConstruction::Construct() // Subtract cylinders corresponding to vacuum chamber flange and pipe G4double flgR = geoChamber->GetJunFlangeROut(); - G4double flgL = geoChamber->GetJunFlangeThick()-2.*mm; // Apparently the flange thickness is less than 2cm + // Apparently the chamber flange thickness is thinner than the 2cm flange on the junction pipe + // To be checked on the real chamber + G4double flgL = geoChamber->GetJunFlangeThick()-3.*mm; G4Tubs* cmvSolid2 = new G4Tubs("CMV2",0.,flgR,0.5*flgL,0.*deg,360.*deg); G4double pipR = geoChamber->GetJunROut(); G4double pipL = geoChamber->GetVCOuterFacePosZ()-geoChamber->GetJunBackFacePosZ()-flgL; From 41374de635b3943ded1bbf8f19dcb3521a40b10d Mon Sep 17 00:00:00 2001 From: taruggi Date: Tue, 19 Nov 2019 15:50:17 +0100 Subject: [PATCH 11/64] First update --- PadmeReco/SAC/src/DigitizerChannelSAC.cc | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/PadmeReco/SAC/src/DigitizerChannelSAC.cc b/PadmeReco/SAC/src/DigitizerChannelSAC.cc index 877f5ad5..ace83132 100644 --- a/PadmeReco/SAC/src/DigitizerChannelSAC.cc +++ b/PadmeReco/SAC/src/DigitizerChannelSAC.cc @@ -170,7 +170,7 @@ Double_t DigitizerChannelSAC::CalcChaTime(std::vector &hitArray,USh for(UShort_t s=0;sSetContent(AbsSamRec); char name[50]; @@ -191,6 +191,7 @@ Double_t DigitizerChannelSAC::CalcChaTime(std::vector &hitArray,USh // std::cout<15.) std::cout<fAmpThresholdHigh && VMax>-2*VMin){ // zero suppression on Voltage normalize to energy. + //if(VMax>fAmpThresholdHigh){//CTVMax TSpectrum *s = &SpectrumProcessor;//new TSpectrum(npeaks); Double_t peak_thr = fAmpThresholdLow/VMax; //minimum peak height allowed. Int_t nfound = s->Search(H1,2,"",peak_thr); //corrected for 2.5GHz cannot be less then 0.05 @@ -226,11 +227,13 @@ Double_t DigitizerChannelSAC::CalcChaTime(std::vector &hitArray,USh fCalibEnergy = fCharge/pCMeV*fCalibCh[ElCh]; //calibrated energy of the hit TRecoVHit *Hit = new TRecoVHit(); - if(yp>-3*VMin){ + if(yp>-2*VMin){ + //if(yp>-1*VMin){//CTVMin + //if(1){ Hit->SetTime(fTime); - // Hit->SetEnergy(fCharge); // need to add hit status + //Hit->SetEnergy(fCharge); // need to add hit status Hit->SetEnergy(fEnergy); //here, if you need, you can change the variable you need (at this point you can only use one) - //Hit->SetEnergy(yp); // need to add hit status to avoid saturations + //Hit->SetEnergy(VMax); // need to add hit status to avoid saturations hitArray.push_back(Hit); }else{ // fileOut->cd(); From 1e94c58139eae292819e05058e343d0cfbe02944 Mon Sep 17 00:00:00 2001 From: taruggi Date: Tue, 19 Nov 2019 16:09:11 +0100 Subject: [PATCH 12/64] SAC mapping fixed in DigitizerChannelSAC.cc and SACReconstruction.cc --- PadmeReco/SAC/src/SACReconstruction.cc | 10 +++++----- PadmeReco/config/PadmeReconstruction.cfg | 20 ++++++++++---------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/PadmeReco/SAC/src/SACReconstruction.cc b/PadmeReco/SAC/src/SACReconstruction.cc index f8e8fb2b..3e722f20 100644 --- a/PadmeReco/SAC/src/SACReconstruction.cc +++ b/PadmeReco/SAC/src/SACReconstruction.cc @@ -39,11 +39,11 @@ Int_t SACReconstruction::FindSeed(Int_t nele, Int_t * Used, Double_t* Ene) { Int_t SACReconstruction::IsSeedNeig(Int_t seedID, Int_t cellID) { //uses cellID to find neig cells wrt seed of the cluster Int_t IsNeig=-1; - Int_t SeedRow=seedID/10; - Int_t SeedCol=seedID%10; + Int_t SeedRow=seedID%10; + Int_t SeedCol=seedID/10; - Int_t CellRow=cellID/10; - Int_t CellCol=cellID%10; + Int_t CellRow=cellID%10; + Int_t CellCol=cellID/10; //excludes the seed cell if( abs(SeedRow-CellRow)<=1 && abs(SeedCol-CellCol)<=1) IsNeig=1; // std::cout<<"seedID "<GetTime(); int ich = Hits[iHit1]->GetChannelId(); - int ElCh = ich/10*5 +ich%5; + int ElCh = ich/10 +ich%10*5; Energy += Hits[iHit1]->GetEnergy(); //SAC total energy ECh[ElCh]+= Hits[iHit1]->GetEnergy(); //SAC total energy GetHisto("SACOccupancy") -> Fill(4.5-(ich/10),0.5+ich%10); /* inserted 4.5- to swap PG */ diff --git a/PadmeReco/config/PadmeReconstruction.cfg b/PadmeReco/config/PadmeReconstruction.cfg index e941ac1d..fb8368ca 100644 --- a/PadmeReco/config/PadmeReconstruction.cfg +++ b/PadmeReco/config/PadmeReconstruction.cfg @@ -1,20 +1,20 @@ [RECOALGORITHMS] -ECal 1 +ECal 0 SAC 1 -PVeto 1 -EVeto 1 -HEPVeto 1 +PVeto 0 +EVeto 0 +HEPVeto 0 TPix 0 -Target 1 +Target 0 [RECOOutput] -ECal 1 +ECal 0 SAC 1 -PVeto 1 -EVeto 1 -HEPVeto 1 +PVeto 0 +EVeto 0 +HEPVeto 0 TPix 0 -Target 1 +Target 0 [RUNNINGMODE] Pedestals 0 From 4ecbf88d558835d5f0971d9a5ed8c2b3803a14ad Mon Sep 17 00:00:00 2001 From: taruggi Date: Tue, 19 Nov 2019 17:04:18 +0100 Subject: [PATCH 13/64] PadmeReconstruction.cfg back to develop version --- PadmeReco/config/PadmeReconstruction.cfg | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/PadmeReco/config/PadmeReconstruction.cfg b/PadmeReco/config/PadmeReconstruction.cfg index 8538bbc5..69643024 100644 --- a/PadmeReco/config/PadmeReconstruction.cfg +++ b/PadmeReco/config/PadmeReconstruction.cfg @@ -1,20 +1,20 @@ [RECOALGORITHMS] -ECal 0 +ECal 1 SAC 1 -PVeto 0 -EVeto 0 -HEPVeto 0 +PVeto 1 +EVeto 1 +HEPVeto 1 TPix 0 -Target 0 +Target 1 [RECOOutput] -ECal 0 +ECal 1 SAC 1 -PVeto 0 -EVeto 0 -HEPVeto 0 +PVeto 1 +EVeto 1 +HEPVeto 1 TPix 0 -Target 0 +Target 1 [RUNNINGMODE] Pedestals 0 From bec980814a34495dd4ac4e057b46403cb4537407 Mon Sep 17 00:00:00 2001 From: Emanuele Leonardi Date: Fri, 22 Nov 2019 09:48:02 +0100 Subject: [PATCH 14/64] PadmeDB: Updated DB schema and DB population script --- PadmeDB/PadmeDAQ_data.txt | 13 ++ PadmeDB/PadmeDAQ_schema.sql | 370 ++++++++++++------------------------ PadmeDB/PadmeDAQ_setup.py | 58 +++++- 3 files changed, 194 insertions(+), 247 deletions(-) diff --git a/PadmeDB/PadmeDAQ_data.txt b/PadmeDB/PadmeDAQ_data.txt index 8aea544c..db773fa2 100644 --- a/PadmeDB/PadmeDAQ_data.txt +++ b/PadmeDB/PadmeDAQ_data.txt @@ -96,3 +96,16 @@ run_type 3 CALIBRATION Calibration run run_type 4 RANDOM Random triggers run run_type 5 OTHER Run of a type which was not forseen run_type 6 FAKE Run will use PadmeDAQ in FAKE mode (experts only!) + +# List of process types +proc_type 0 ADCDAQ PadmeDAQ Configure an ADC board, collect its data, format them, and send them to ZEROSUP +proc_type 1 ZEROSUP PadmeDAQ Receive data from ADCDAQ, apply zero suppression, and send them to MERGER +proc_type 2 TRIGGER PadmeTrig Configure the Trigger board, collect its data, format them, and send them to MERGER +proc_type 3 MERGER PadmeMerger Receive data from TRIGGER and ZEROSUP, synchronize and merge them into a full event, and distribute it to LEVEL1 +proc_type 4 LEVEL1 PadmeLevel1 Receive full events from MERGER, apply level1 selection, save them to RAWDATA file in ROOT format + +# List of file types +file_type 0 RAWDATA PadmeLevel1 Full raw events in ROOT format. +file_type 1 DAQDATA PadmeDAQ Data from a single ADC board in DAQ Event format. +file_type 2 TRIGDATA PadmeTrig Data from the Trigger board in Trigger Event format. +file_type 3 EVTDATA PadmeMerger Data from the Merger in Full Event format. diff --git a/PadmeDB/PadmeDAQ_schema.sql b/PadmeDB/PadmeDAQ_schema.sql index 24558e90..c2344c8b 100644 --- a/PadmeDB/PadmeDAQ_schema.sql +++ b/PadmeDB/PadmeDAQ_schema.sql @@ -1,5 +1,5 @@ -- MySQL Script generated by MySQL Workbench --- 07/27/18 14:25:44 +-- 11/22/19 09:39:01 -- Model: New Model Version: 1.0 -- MySQL Workbench Forward Engineering @@ -23,9 +23,9 @@ USE `PadmeDAQ` ; -- ----------------------------------------------------- CREATE TABLE IF NOT EXISTS `PadmeDAQ`.`run_type` ( `id` INT NOT NULL, - `short_name` VARCHAR(255) NOT NULL, + `type` VARCHAR(255) NOT NULL, `description` TEXT NULL, - UNIQUE INDEX `short_name_UNIQUE` (`short_name` ASC), + UNIQUE INDEX `short_name_UNIQUE` (`type` ASC), PRIMARY KEY (`id`)) ENGINE = InnoDB; @@ -52,82 +52,6 @@ CREATE TABLE IF NOT EXISTS `PadmeDAQ`.`run` ( ENGINE = InnoDB; --- ----------------------------------------------------- --- Table `PadmeDAQ`.`node` --- ----------------------------------------------------- -CREATE TABLE IF NOT EXISTS `PadmeDAQ`.`node` ( - `id` INT NOT NULL, - `name` VARCHAR(255) NOT NULL, - `mac_addr_lnf` CHAR(17) NULL, - `ip_addr_lnf` VARCHAR(15) NULL, - `mac_addr_daq` CHAR(17) NULL, - `ip_addr_daq` VARCHAR(15) NULL, - `mac_addr_dcs` CHAR(17) NULL, - `ip_addr_dcs` VARCHAR(15) NULL, - `mac_addr_ipmi` CHAR(17) NULL, - `ip_addr_ipmi` VARCHAR(15) NULL, - PRIMARY KEY (`id`), - UNIQUE INDEX `name_UNIQUE` (`name` ASC), - UNIQUE INDEX `mac_addr_lnf_UNIQUE` (`mac_addr_lnf` ASC), - UNIQUE INDEX `ip_addr_lnf_UNIQUE` (`ip_addr_lnf` ASC), - UNIQUE INDEX `mac_addr_daq_UNIQUE` (`mac_addr_daq` ASC), - UNIQUE INDEX `ip_addr_daq_UNIQUE` (`ip_addr_daq` ASC), - UNIQUE INDEX `mac_addr_dcs_UNIQUE` (`mac_addr_dcs` ASC), - UNIQUE INDEX `ip_addr_dcs_UNIQUE` (`ip_addr_dcs` ASC), - UNIQUE INDEX `mac_addr_ipmi_UNIQUE` (`mac_addr_ipmi` ASC), - UNIQUE INDEX `ip_addr_ipmi_UNIQUE` (`ip_addr_ipmi` ASC)) -ENGINE = InnoDB; - - --- ----------------------------------------------------- --- Table `PadmeDAQ`.`optical_link` --- ----------------------------------------------------- -CREATE TABLE IF NOT EXISTS `PadmeDAQ`.`optical_link` ( - `id` INT NOT NULL AUTO_INCREMENT, - `node_id` INT NOT NULL, - `controller_id` INT UNSIGNED NOT NULL, - `channel_id` INT UNSIGNED NOT NULL, - `slot_id` INT UNSIGNED NOT NULL, - PRIMARY KEY (`id`), - INDEX `fk_optical_link_node1_idx` (`node_id` ASC), - CONSTRAINT `fk_optical_link_node1` - FOREIGN KEY (`node_id`) - REFERENCES `PadmeDAQ`.`node` (`id`) - ON DELETE NO ACTION - ON UPDATE NO ACTION) -ENGINE = InnoDB; - - --- ----------------------------------------------------- --- Table `PadmeDAQ`.`daq_process` --- ----------------------------------------------------- -CREATE TABLE IF NOT EXISTS `PadmeDAQ`.`daq_process` ( - `id` INT NOT NULL AUTO_INCREMENT, - `mode` VARCHAR(16) NOT NULL, - `run_number` INT NOT NULL, - `optical_link_id` INT NOT NULL, - `status` INT NOT NULL, - `time_start` DATETIME NULL, - `time_stop` DATETIME NULL, - `n_daq_files` INT UNSIGNED NULL, - `total_events` INT UNSIGNED NULL, - `total_size` BIGINT UNSIGNED NULL, - PRIMARY KEY (`id`), - INDEX `fk_daq_process_run1_idx` (`run_number` ASC), - INDEX `fk_daq_process_optical_link1_idx` (`optical_link_id` ASC), - CONSTRAINT `fk_daq_process_run1` - FOREIGN KEY (`run_number`) - REFERENCES `PadmeDAQ`.`run` (`number`) - ON DELETE NO ACTION - ON UPDATE NO ACTION, - CONSTRAINT `fk_daq_process_optical_link1` - FOREIGN KEY (`optical_link_id`) - REFERENCES `PadmeDAQ`.`optical_link` (`id`) - ON DELETE NO ACTION - ON UPDATE NO ACTION) -ENGINE = InnoDB; - - -- ----------------------------------------------------- -- Table `PadmeDAQ`.`board_type` -- ----------------------------------------------------- @@ -172,30 +96,6 @@ CREATE TABLE IF NOT EXISTS `PadmeDAQ`.`config_para_name` ( ENGINE = InnoDB; --- ----------------------------------------------------- --- Table `PadmeDAQ`.`daq_proc_config_para` --- ----------------------------------------------------- -CREATE TABLE IF NOT EXISTS `PadmeDAQ`.`daq_proc_config_para` ( - `id` INT NOT NULL AUTO_INCREMENT, - `daq_process_id` INT NOT NULL, - `config_para_name_id` INT NOT NULL, - `value` VARCHAR(1024) NOT NULL, - PRIMARY KEY (`id`), - INDEX `fk_daq_proc_config_para_config_para_name1_idx` (`config_para_name_id` ASC), - INDEX `fk_daq_proc_config_para_process1_idx` (`daq_process_id` ASC), - CONSTRAINT `fk_daq_proc_config_para_config_para_name1` - FOREIGN KEY (`config_para_name_id`) - REFERENCES `PadmeDAQ`.`config_para_name` (`id`) - ON DELETE NO ACTION - ON UPDATE NO ACTION, - CONSTRAINT `fk_daq_proc_config_para_process1` - FOREIGN KEY (`daq_process_id`) - REFERENCES `PadmeDAQ`.`daq_process` (`id`) - ON DELETE NO ACTION - ON UPDATE NO ACTION) -ENGINE = InnoDB; - - -- ----------------------------------------------------- -- Table `PadmeDAQ`.`run_config_para` -- ----------------------------------------------------- @@ -221,78 +121,44 @@ ENGINE = InnoDB; -- ----------------------------------------------------- --- Table `PadmeDAQ`.`l_board_optical_link` --- ----------------------------------------------------- -CREATE TABLE IF NOT EXISTS `PadmeDAQ`.`l_board_optical_link` ( - `id` INT NOT NULL AUTO_INCREMENT, - `board_id` INT NOT NULL, - `optical_link_id` INT NOT NULL, - `time_start` DATETIME NULL, - `time_stop` DATETIME NULL, - INDEX `fk_l_board_optical_link_board1_idx` (`board_id` ASC), - INDEX `fk_l_board_optical_link_optical_link1_idx` (`optical_link_id` ASC), - PRIMARY KEY (`id`), - CONSTRAINT `fk_l_board_optical_link_board1` - FOREIGN KEY (`board_id`) - REFERENCES `PadmeDAQ`.`board` (`id`) - ON DELETE NO ACTION - ON UPDATE NO ACTION, - CONSTRAINT `fk_l_board_optical_link_optical_link1` - FOREIGN KEY (`optical_link_id`) - REFERENCES `PadmeDAQ`.`optical_link` (`id`) - ON DELETE NO ACTION - ON UPDATE NO ACTION) -ENGINE = InnoDB; - - --- ----------------------------------------------------- --- Table `PadmeDAQ`.`daq_file` +-- Table `PadmeDAQ`.`node` -- ----------------------------------------------------- -CREATE TABLE IF NOT EXISTS `PadmeDAQ`.`daq_file` ( - `id` INT NOT NULL AUTO_INCREMENT, - `daq_process_id` INT NOT NULL, +CREATE TABLE IF NOT EXISTS `PadmeDAQ`.`node` ( + `id` INT NOT NULL, `name` VARCHAR(255) NOT NULL, - `version` INT NULL, - `part` INT UNSIGNED NULL, - `time_open` DATETIME NULL, - `time_close` DATETIME NULL, - `n_events` INT UNSIGNED NULL, - `size` BIGINT UNSIGNED NULL, + `mac_addr_lnf` CHAR(17) NULL, + `ip_addr_lnf` VARCHAR(15) NULL, + `mac_addr_daq` CHAR(17) NULL, + `ip_addr_daq` VARCHAR(15) NULL, + `mac_addr_dcs` CHAR(17) NULL, + `ip_addr_dcs` VARCHAR(15) NULL, + `mac_addr_ipmi` CHAR(17) NULL, + `ip_addr_ipmi` VARCHAR(15) NULL, PRIMARY KEY (`id`), - UNIQUE INDEX `daq_file_name_UNIQUE` (`name` ASC), - INDEX `fk_daq_file_daq_process1_idx` (`daq_process_id` ASC), - CONSTRAINT `fk_daq_file_daq_process1` - FOREIGN KEY (`daq_process_id`) - REFERENCES `PadmeDAQ`.`daq_process` (`id`) - ON DELETE NO ACTION - ON UPDATE NO ACTION) + UNIQUE INDEX `name_UNIQUE` (`name` ASC), + UNIQUE INDEX `mac_addr_lnf_UNIQUE` (`mac_addr_lnf` ASC), + UNIQUE INDEX `ip_addr_lnf_UNIQUE` (`ip_addr_lnf` ASC), + UNIQUE INDEX `mac_addr_daq_UNIQUE` (`mac_addr_daq` ASC), + UNIQUE INDEX `ip_addr_daq_UNIQUE` (`ip_addr_daq` ASC), + UNIQUE INDEX `mac_addr_dcs_UNIQUE` (`mac_addr_dcs` ASC), + UNIQUE INDEX `ip_addr_dcs_UNIQUE` (`ip_addr_dcs` ASC), + UNIQUE INDEX `mac_addr_ipmi_UNIQUE` (`mac_addr_ipmi` ASC), + UNIQUE INDEX `ip_addr_ipmi_UNIQUE` (`ip_addr_ipmi` ASC)) ENGINE = InnoDB; -- ----------------------------------------------------- --- Table `PadmeDAQ`.`level1_process` +-- Table `PadmeDAQ`.`optical_link` -- ----------------------------------------------------- -CREATE TABLE IF NOT EXISTS `PadmeDAQ`.`level1_process` ( +CREATE TABLE IF NOT EXISTS `PadmeDAQ`.`optical_link` ( `id` INT NOT NULL AUTO_INCREMENT, - `run_number` INT NOT NULL, `node_id` INT NOT NULL, - `number` INT NULL, - `status` INT NOT NULL, - `time_start` DATETIME NULL, - `time_stop` DATETIME NULL, - `n_raw_files` INT UNSIGNED NULL, - `total_events` INT UNSIGNED NULL, - `total_size` BIGINT UNSIGNED NULL, + `controller_id` INT UNSIGNED NOT NULL, + `channel_id` INT UNSIGNED NOT NULL, + `slot_id` INT UNSIGNED NOT NULL, PRIMARY KEY (`id`), - INDEX `fk_lvl1_process_run1_idx` (`run_number` ASC), - INDEX `fk_lvl1_process_node1_idx` (`node_id` ASC), - UNIQUE INDEX `lvl1_run_and_number` (`run_number` ASC, `number` ASC), - CONSTRAINT `fk_lvl1_process_run1` - FOREIGN KEY (`run_number`) - REFERENCES `PadmeDAQ`.`run` (`number`) - ON DELETE NO ACTION - ON UPDATE NO ACTION, - CONSTRAINT `fk_lvl1_process_node1` + INDEX `fk_optical_link_node1_idx` (`node_id` ASC), + CONSTRAINT `fk_optical_link_node1` FOREIGN KEY (`node_id`) REFERENCES `PadmeDAQ`.`node` (`id`) ON DELETE NO ACTION @@ -301,48 +167,25 @@ ENGINE = InnoDB; -- ----------------------------------------------------- --- Table `PadmeDAQ`.`level1_proc_config_para` +-- Table `PadmeDAQ`.`l_board_optical_link` -- ----------------------------------------------------- -CREATE TABLE IF NOT EXISTS `PadmeDAQ`.`level1_proc_config_para` ( +CREATE TABLE IF NOT EXISTS `PadmeDAQ`.`l_board_optical_link` ( `id` INT NOT NULL AUTO_INCREMENT, - `level1_process_id` INT NOT NULL, - `config_para_name_id` INT NOT NULL, - `value` VARCHAR(1024) NOT NULL, + `board_id` INT NOT NULL, + `optical_link_id` INT NOT NULL, + `time_start` DATETIME NULL, + `time_stop` DATETIME NULL, + INDEX `fk_l_board_optical_link_board1_idx` (`board_id` ASC), + INDEX `fk_l_board_optical_link_optical_link1_idx` (`optical_link_id` ASC), PRIMARY KEY (`id`), - INDEX `fk_lvl1_proc_config_para_config_para_name1_idx` (`config_para_name_id` ASC), - INDEX `fk_lvl1_proc_config_para_lvl1_process1_idx` (`level1_process_id` ASC), - CONSTRAINT `fk_lvl1_proc_config_para_config_para_name1` - FOREIGN KEY (`config_para_name_id`) - REFERENCES `PadmeDAQ`.`config_para_name` (`id`) + CONSTRAINT `fk_l_board_optical_link_board1` + FOREIGN KEY (`board_id`) + REFERENCES `PadmeDAQ`.`board` (`id`) ON DELETE NO ACTION ON UPDATE NO ACTION, - CONSTRAINT `fk_lvl1_proc_config_para_lvl1_process1` - FOREIGN KEY (`level1_process_id`) - REFERENCES `PadmeDAQ`.`level1_process` (`id`) - ON DELETE NO ACTION - ON UPDATE NO ACTION) -ENGINE = InnoDB; - - --- ----------------------------------------------------- --- Table `PadmeDAQ`.`raw_file` --- ----------------------------------------------------- -CREATE TABLE IF NOT EXISTS `PadmeDAQ`.`raw_file` ( - `id` INT NOT NULL AUTO_INCREMENT, - `lvl1_process_id` INT NOT NULL, - `name` VARCHAR(255) NOT NULL, - `part` INT UNSIGNED NULL, - `status` INT NULL, - `time_open` DATETIME NULL, - `time_close` DATETIME NULL, - `n_events` INT UNSIGNED NULL, - `size` BIGINT UNSIGNED NULL, - PRIMARY KEY (`id`), - UNIQUE INDEX `raw_file_name_UNIQUE` (`name` ASC), - INDEX `fk_raw_file_lvl1_process1_idx` (`lvl1_process_id` ASC), - CONSTRAINT `fk_raw_file_lvl1_process1` - FOREIGN KEY (`lvl1_process_id`) - REFERENCES `PadmeDAQ`.`level1_process` (`id`) + CONSTRAINT `fk_l_board_optical_link_optical_link1` + FOREIGN KEY (`optical_link_id`) + REFERENCES `PadmeDAQ`.`optical_link` (`id`) ON DELETE NO ACTION ON UPDATE NO ACTION) ENGINE = InnoDB; @@ -369,105 +212,142 @@ ENGINE = InnoDB; -- ----------------------------------------------------- --- Table `PadmeDAQ`.`merger_process` +-- Table `PadmeDAQ`.`process_type` -- ----------------------------------------------------- -CREATE TABLE IF NOT EXISTS `PadmeDAQ`.`merger_process` ( +CREATE TABLE IF NOT EXISTS `PadmeDAQ`.`process_type` ( + `id` INT NOT NULL, + `type` VARCHAR(255) NOT NULL, + `executable` VARCHAR(255) NULL, + `description` TEXT NULL, + UNIQUE INDEX `short_name_UNIQUE` (`type` ASC), + PRIMARY KEY (`id`)) +ENGINE = InnoDB; + + +-- ----------------------------------------------------- +-- Table `PadmeDAQ`.`process` +-- ----------------------------------------------------- +CREATE TABLE IF NOT EXISTS `PadmeDAQ`.`process` ( `id` INT NOT NULL AUTO_INCREMENT, `run_number` INT NOT NULL, + `process_type_id` INT NOT NULL, `node_id` INT NOT NULL, `status` INT NOT NULL, `time_start` DATETIME NULL, `time_stop` DATETIME NULL, - `n_raw_files` INT UNSIGNED NULL, + `n_files` INT UNSIGNED NULL, `total_events` INT UNSIGNED NULL, `total_size` BIGINT UNSIGNED NULL, PRIMARY KEY (`id`), - INDEX `fk_lvl1_process_run1_idx` (`run_number` ASC), - INDEX `fk_lvl1_process_node1_idx` (`node_id` ASC), - CONSTRAINT `fk_lvl1_process_run1` + INDEX `fk_daq_process_run1_idx` (`run_number` ASC), + INDEX `fk_process_node1_idx` (`node_id` ASC), + INDEX `fk_process_process_type1_idx` (`process_type_id` ASC), + CONSTRAINT `fk_daq_process_run1` FOREIGN KEY (`run_number`) REFERENCES `PadmeDAQ`.`run` (`number`) ON DELETE NO ACTION ON UPDATE NO ACTION, - CONSTRAINT `fk_lvl1_process_node1` + CONSTRAINT `fk_process_node1` FOREIGN KEY (`node_id`) REFERENCES `PadmeDAQ`.`node` (`id`) ON DELETE NO ACTION + ON UPDATE NO ACTION, + CONSTRAINT `fk_process_process_type1` + FOREIGN KEY (`process_type_id`) + REFERENCES `PadmeDAQ`.`process_type` (`id`) + ON DELETE NO ACTION ON UPDATE NO ACTION) ENGINE = InnoDB; -- ----------------------------------------------------- --- Table `PadmeDAQ`.`trigger_process` +-- Table `PadmeDAQ`.`daq_link` -- ----------------------------------------------------- -CREATE TABLE IF NOT EXISTS `PadmeDAQ`.`trigger_process` ( +CREATE TABLE IF NOT EXISTS `PadmeDAQ`.`daq_link` ( `id` INT NOT NULL AUTO_INCREMENT, - `run_number` INT NOT NULL, - `node_id` INT NOT NULL, - `status` INT NOT NULL, - `time_start` DATETIME NULL, - `time_stop` DATETIME NULL, - `total_events` INT UNSIGNED NULL, - `total_size` BIGINT UNSIGNED NULL, + `process_id` INT NOT NULL, + `optical_link_id` INT NOT NULL, PRIMARY KEY (`id`), - INDEX `fk_lvl1_process_run1_idx` (`run_number` ASC), - INDEX `fk_lvl1_process_node1_idx` (`node_id` ASC), - CONSTRAINT `fk_lvl1_process_run1` - FOREIGN KEY (`run_number`) - REFERENCES `PadmeDAQ`.`run` (`number`) + INDEX `fk_daq_process_optical_link1_idx` (`optical_link_id` ASC), + INDEX `fk_daq_link_process1_idx` (`process_id` ASC), + CONSTRAINT `fk_daq_process_optical_link1` + FOREIGN KEY (`optical_link_id`) + REFERENCES `PadmeDAQ`.`optical_link` (`id`) ON DELETE NO ACTION ON UPDATE NO ACTION, - CONSTRAINT `fk_lvl1_process_node1` - FOREIGN KEY (`node_id`) - REFERENCES `PadmeDAQ`.`node` (`id`) + CONSTRAINT `fk_daq_link_process1` + FOREIGN KEY (`process_id`) + REFERENCES `PadmeDAQ`.`process` (`id`) ON DELETE NO ACTION ON UPDATE NO ACTION) ENGINE = InnoDB; -- ----------------------------------------------------- --- Table `PadmeDAQ`.`trigger_proc_config_para` +-- Table `PadmeDAQ`.`file_type` +-- ----------------------------------------------------- +CREATE TABLE IF NOT EXISTS `PadmeDAQ`.`file_type` ( + `id` INT NOT NULL, + `type` VARCHAR(255) NOT NULL, + `producer` VARCHAR(255) NULL, + `description` TEXT NULL, + UNIQUE INDEX `short_name_UNIQUE` (`type` ASC), + PRIMARY KEY (`id`)) +ENGINE = InnoDB; + + +-- ----------------------------------------------------- +-- Table `PadmeDAQ`.`file` -- ----------------------------------------------------- -CREATE TABLE IF NOT EXISTS `PadmeDAQ`.`trigger_proc_config_para` ( +CREATE TABLE IF NOT EXISTS `PadmeDAQ`.`file` ( `id` INT NOT NULL AUTO_INCREMENT, - `trigger_process_id` INT NOT NULL, - `config_para_name_id` INT NOT NULL, - `value` VARCHAR(1024) NOT NULL, + `name` VARCHAR(255) NOT NULL, + `file_type_id` INT NOT NULL, + `version` INT NULL, + `process_id` INT NOT NULL, + `part` INT UNSIGNED NULL, + `status` INT NULL, + `time_open` DATETIME NULL, + `time_close` DATETIME NULL, + `n_events` INT UNSIGNED NULL, + `size` BIGINT UNSIGNED NULL, + `adler32` CHAR(8) NULL, PRIMARY KEY (`id`), - INDEX `fk_daq_proc_config_para_config_para_name1_idx` (`config_para_name_id` ASC), - INDEX `fk_trigger_proc_config_para_trigger_process1_idx` (`trigger_process_id` ASC), - CONSTRAINT `fk_daq_proc_config_para_config_para_name1` - FOREIGN KEY (`config_para_name_id`) - REFERENCES `PadmeDAQ`.`config_para_name` (`id`) + UNIQUE INDEX `raw_file_name_UNIQUE` (`name` ASC), + INDEX `fk_file_process1_idx` (`process_id` ASC), + INDEX `fk_file_file_type1_idx` (`file_type_id` ASC), + CONSTRAINT `fk_file_process1` + FOREIGN KEY (`process_id`) + REFERENCES `PadmeDAQ`.`process` (`id`) ON DELETE NO ACTION ON UPDATE NO ACTION, - CONSTRAINT `fk_trigger_proc_config_para_trigger_process1` - FOREIGN KEY (`trigger_process_id`) - REFERENCES `PadmeDAQ`.`trigger_process` (`id`) + CONSTRAINT `fk_file_file_type1` + FOREIGN KEY (`file_type_id`) + REFERENCES `PadmeDAQ`.`file_type` (`id`) ON DELETE NO ACTION ON UPDATE NO ACTION) ENGINE = InnoDB; -- ----------------------------------------------------- --- Table `PadmeDAQ`.`merger_proc_config_para` +-- Table `PadmeDAQ`.`proc_config_para` -- ----------------------------------------------------- -CREATE TABLE IF NOT EXISTS `PadmeDAQ`.`merger_proc_config_para` ( +CREATE TABLE IF NOT EXISTS `PadmeDAQ`.`proc_config_para` ( `id` INT NOT NULL AUTO_INCREMENT, - `merger_process_id` INT NOT NULL, + `process_id` INT NOT NULL, `config_para_name_id` INT NOT NULL, `value` VARCHAR(1024) NOT NULL, PRIMARY KEY (`id`), INDEX `fk_daq_proc_config_para_config_para_name1_idx` (`config_para_name_id` ASC), - INDEX `fk_merger_proc_config_para_merger_process1_idx` (`merger_process_id` ASC), + INDEX `fk_proc_config_para_process1_idx` (`process_id` ASC), CONSTRAINT `fk_daq_proc_config_para_config_para_name1` FOREIGN KEY (`config_para_name_id`) REFERENCES `PadmeDAQ`.`config_para_name` (`id`) ON DELETE NO ACTION ON UPDATE NO ACTION, - CONSTRAINT `fk_merger_proc_config_para_merger_process1` - FOREIGN KEY (`merger_process_id`) - REFERENCES `PadmeDAQ`.`merger_process` (`id`) + CONSTRAINT `fk_proc_config_para_process1` + FOREIGN KEY (`process_id`) + REFERENCES `PadmeDAQ`.`process` (`id`) ON DELETE NO ACTION ON UPDATE NO ACTION) ENGINE = InnoDB; diff --git a/PadmeDB/PadmeDAQ_setup.py b/PadmeDB/PadmeDAQ_setup.py index ec319ca9..67d78254 100755 --- a/PadmeDB/PadmeDAQ_setup.py +++ b/PadmeDB/PadmeDAQ_setup.py @@ -12,6 +12,8 @@ re_node = re.compile("^\s*node\s+(\d+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s*$") re_link = re.compile("^\s*link\s+(\d+):(\d+):(\d+):(\d+)\s+(-*\d+)\s+(\d+-\d+-\d+)\s+(\d+:\d+:\d+)\s*$") re_run_type = re.compile("^\s*run_type\s+(\d+)\s+(\S+)\s+(\S.*)$") +re_proc_type = re.compile("^\s*proc_type\s+(\d+)\s+(\S+)\s+(\S+)\s+(\S.*)$") +re_file_type = re.compile("^\s*file_type\s+(\d+)\s+(\S+)\s+(\S+)\s+(\S.*)$") max_datetime = "2049-12-31 23:59:59" @@ -217,11 +219,11 @@ def main(): (run_type_id,run_type,run_text) = m.group(1,2,3) run_type_id = int(run_type_id) - c.execute("""SELECT id,description FROM run_type WHERE short_name=%s""",(run_type,)) + c.execute("""SELECT id,description FROM run_type WHERE type=%s""",(run_type,)) res = c.fetchone() if (res == None): print "Adding run type",run_type,"with id",run_type_id,"and description",run_text - c.execute("""INSERT INTO run_type(id,short_name,description) VALUES(%s,%s,%s)""",(run_type_id,run_type,run_text)) + c.execute("""INSERT INTO run_type(id,type,description) VALUES(%s,%s,%s)""",(run_type_id,run_type,run_text)) else: (old_id,old_text) = res old_id = int(old_id) @@ -234,6 +236,58 @@ def main(): print "\tNew:",run_text c.execute("""UPDATE run_type SET description=%s WHERE id=%s""",(run_text,run_type_id)) + # Check process types + m = re_proc_type.search(l) + if (m): + + (proc_type_id,proc_type,proc_exec,proc_text) = m.group(1,2,3,4) + proc_type_id = int(proc_type_id) + c.execute("""SELECT id,executable,description FROM process_type WHERE type=%s""",(proc_type,)) + res = c.fetchone() + if (res == None): + print "Adding process type",proc_type,"with id",proc_type_id,"executable",proc_exec,"and description",proc_text + c.execute("""INSERT INTO process_type(id,type,executable,description) VALUES(%s,%s,%s,%s)""",(proc_type_id,proc_type,proc_exec,proc_text)) + else: + (old_id,old_exec,old_text) = res + old_id = int(old_id) + if (proc_type_id != old_id): + print "ERROR - Process type",proc_type,"already exists with id",old_id,"!=",proc_type_id + exit(1) + if (proc_exec != old_exec): + print "ERROR - Process type",proc_type,"already exists with executable",old_exec,"!=",proc_exec + exit(1) + if (proc_text != old_text): + print "WARNING - Updating process type",proc_type,"description" + print "\tOld:",old_text + print "\tNew:",proc_text + c.execute("""UPDATE process_type SET description=%s WHERE id=%s""",(proc_text,proc_type_id)) + + # Check file types + m = re_file_type.search(l) + if (m): + + (file_type_id,file_type,file_prod,file_text) = m.group(1,2,3,4) + file_type_id = int(file_type_id) + c.execute("""SELECT id,producer,description FROM file_type WHERE type=%s""",(file_type,)) + res = c.fetchone() + if (res == None): + print "Adding file type",file_type,"with id",file_type_id,"producer",file_prod,"and description",file_text + c.execute("""INSERT INTO file_type(id,type,producer,description) VALUES(%s,%s,%s,%s)""",(file_type_id,file_type,file_prod,file_text)) + else: + (old_id,old_prod,old_text) = res + old_id = int(old_id) + if (file_type_id != old_id): + print "ERROR - File type",file_type,"already exists with id",old_id,"!=",file_type_id + exit(1) + if (file_prod != old_prod): + print "ERROR - File type",file_type,"already exists with producer",old_prod,"!=",file_prod + exit(1) + if (file_text != old_text): + print "WARNING - Updating file type",file_type,"description" + print "\tOld:",old_text + print "\tNew:",file_text + c.execute("""UPDATE file_type SET description=%s WHERE id=%s""",(file_text,file_type_id)) + # Commit and close connection to DB conn.commit() conn.close() From 36da0c3881d621ab6ebb20d644709af2fd3e7c4c Mon Sep 17 00:00:00 2001 From: Emanuele Leonardi Date: Fri, 22 Nov 2019 09:53:42 +0100 Subject: [PATCH 15/64] RunControl: removed obsolete DB setup files --- RunControl/db/tools/CreateDB.py | 176 ----------------- RunControl/db/tools/DumpDB.sh | 3 - RunControl/db/tools/MigrateDB.py | 99 ---------- RunControl/db/tools/SetupDB.data | 5 - RunControl/db/tools/SetupDB.py | 107 ---------- RunControl/db/tools/mysql_schema.sql | 257 ------------------------- RunControl/db/tools/mysql_setup.readme | 69 ------- 7 files changed, 716 deletions(-) delete mode 100755 RunControl/db/tools/CreateDB.py delete mode 100755 RunControl/db/tools/DumpDB.sh delete mode 100755 RunControl/db/tools/MigrateDB.py delete mode 100644 RunControl/db/tools/SetupDB.data delete mode 100755 RunControl/db/tools/SetupDB.py delete mode 100644 RunControl/db/tools/mysql_schema.sql delete mode 100644 RunControl/db/tools/mysql_setup.readme diff --git a/RunControl/db/tools/CreateDB.py b/RunControl/db/tools/CreateDB.py deleted file mode 100755 index f5cd5b16..00000000 --- a/RunControl/db/tools/CreateDB.py +++ /dev/null @@ -1,176 +0,0 @@ -#!/usr/bin/python - -import sqlite3 - -def main(): - - conn = sqlite3.connect('PadmeDAQ.db'); - c = conn.cursor() - - c.execute('DROP TABLE IF EXISTS replica') - c.execute('DROP TABLE IF EXISTS file') - c.execute('DROP TABLE IF EXISTS proc_config_para') - c.execute('DROP TABLE IF EXISTS process') - c.execute('DROP TABLE IF EXISTS channel') - c.execute('DROP TABLE IF EXISTS board') - c.execute('DROP TABLE IF EXISTS run_config_para') - c.execute('DROP TABLE IF EXISTS board_type') - c.execute('DROP TABLE IF EXISTS run') - c.execute('DROP TABLE IF EXISTS config_para_name') - - c.execute(''' -CREATE TABLE run ( - number INTEGER PRIMARY KEY, - type TEXT, - status INTEGER, - time_start INTEGER, - time_stop INTEGER, - total_events INTEGER, - comment TEXT -) -''') - - c.execute(''' -CREATE TABLE file ( - id INTEGER PRIMARY KEY, - name TEXT UNIQUE, - type TEXT, - n_events INTEGER, - size INTEGER -) -''') - - c.execute(''' -CREATE TABLE config_para_name( - id INTEGER PRIMARY KEY, - name TEXT UNIQUE -) -''') - - c.execute(''' -CREATE TABLE board_type( - id INTEGER PRIMARY KEY, - type TEXT, - manufacturer TEXT, - model TEXT UNIQUE, - n_channels INTEGER, - UNIQUE(manufacturer,model) -) -''') - - c.execute(''' -CREATE TABLE run_config_para( - id INTEGER PRIMARY KEY, - run_number INTEGER, - config_para_name_id INTEGER, - value TEXT, - FOREIGN KEY(run_number) REFERENCES run(number), - FOREIGN KEY(config_para_name_id) REFERENCES config_para_name(id) -) -''') - - c.execute(''' -CREATE TABLE replica ( - id INTEGER PRIMARY KEY, - file_id INTEGER, - url TEXT UNIQUE, - status INTEGER, - FOREIGN KEY(file_id) REFERENCES file(id) -) -''') - - c.execute(''' -CREATE TABLE board( - id INTEGER PRIMARY KEY, - board_type_id INTEGER, - serial_number TEXT, - UNIQUE(board_type_id,serial_number), - FOREIGN KEY(board_type_id) REFERENCES board_type(id) -) -''') - - c.execute(''' -CREATE TABLE run_file ( - run_number INTEGER, - file_id INTEGER, - PRIMARY KEY(run_number,file_id), - FOREIGN KEY(run_number) REFERENCES run(number), - FOREIGN KEY(file_id) REFERENCES file(id) -) -''') - - c.execute(''' -CREATE TABLE channel( - id INTEGER PRIMARY KEY, - board_id INTEGER, - FOREIGN KEY(board_id) REFERENCES board(id) -) -''') - - c.execute(''' -CREATE TABLE process ( - id INTEGER PRIMARY KEY, - run_number INTEGER, - board_id INTEGER, - status INTEGER, - time_start INTEGER, - time_stop INTEGER, - n_raw_files INTEGER, - total_events INTEGER, - total_size INTEGER, - UNIQUE(run_number,board_id), - FOREIGN KEY(run_number) REFERENCES run(number), - FOREIGN KEY(board_id) REFERENCES board(id) -) -''') - - c.execute(''' -CREATE TABLE raw_file ( - id INTEGER PRIMARY KEY, - name TEXT UNIQUE, - version INTEGER, - process_id INTEGER, - part INTEGER, - time_open INTEGER, - time_close INTEGER, - n_events INTEGER, - size INTEGER, - FOREIGN KEY(process_id) REFERENCES process(id) -) -''') - - c.execute(''' -CREATE TABLE proc_config_para( - id INTEGER PRIMARY KEY, - process_id INTEGER, - config_para_name_id INTEGER, - value TEXT, - FOREIGN KEY(process_id) REFERENCES process(id), - FOREIGN KEY(config_para_name_id) REFERENCES config_para_name(id) -) -''') - -#print -#c.execute('PRAGMA table_info(sqlite_master)') -#for r in c.fetchall(): -# (id,name,type,canbenull,default,isprimarykey) = r -# print id,name,type,canbenull,default,isprimarykey - - # Show all tables - c.execute('SELECT name FROM sqlite_master WHERE type="table"') - res1 = c.fetchall() - for r1 in res1: - (tab,) = r1 - print "\nTable",tab - c.execute('PRAGMA table_info(%s)' % tab) - res2 = c.fetchall() - for r2 in res2: - (id,name,type,canbenull,default,isprimarykey) = r2 - print id,name,type,canbenull,default,isprimarykey - - conn.commit() - conn.close() - -# Execution starts here -if __name__ == "__main__": - main() diff --git a/RunControl/db/tools/DumpDB.sh b/RunControl/db/tools/DumpDB.sh deleted file mode 100755 index 501b23d7..00000000 --- a/RunControl/db/tools/DumpDB.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -sqlite3 PadmeDAQ.db ".dump" diff --git a/RunControl/db/tools/MigrateDB.py b/RunControl/db/tools/MigrateDB.py deleted file mode 100755 index 5f6a5e65..00000000 --- a/RunControl/db/tools/MigrateDB.py +++ /dev/null @@ -1,99 +0,0 @@ -#!/usr/bin/python - -import os -import time -import sqlite3 -import MySQLdb - -def main(): - - # Connect to old version of the DB - connO = sqlite3.connect('/home/daq/DigiDaq/PadmeDAQ/db/PadmeDAQ.db'); - cO = connO.cursor() - - # Get DB connection parameters from environment variables - DB_HOST = os.getenv('PADME_DB_HOST' ,'localhost') - DB_PORT = os.getenv('PADME_DB_PORT' ,'5501') - DB_USER = os.getenv('PADME_DB_USER' ,'padme') - DB_PASSWD = os.getenv('PADME_DB_PASSWD','unknown') - DB_NAME = os.getenv('PADME_DB_NAME' ,'PadmeDB') - - connN = MySQLdb.connect(host=DB_HOST,port=int(DB_PORT),user=DB_USER,passwd=DB_PASSWD,db=DB_NAME) - cN = connN.cursor() - - # Old runs only used board 0 - board_id = 0 - - # The crew was always the same - run_user = "Leonardi, Raggi, Valente" - - # All runs were succesfully closed - run_status = 3 - proc_status = 2 - - # End comment default - comment_end = "Normal end of run" - - # Get list of runs from the old DB and populate new DB - cO.execute("SELECT * FROM run"); - resO = cO.fetchall() - for rO in resO: - - # Get all run data - (number,run_type,status,time_start,time_stop,n_daq_files,total_events,total_size,comment_start,configuration) = rO - - time_start = time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(time_start)) - time_stop = time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(time_stop)) - - # Some go to the new run table - cN.execute("""INSERT INTO run VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)""", - (number,run_type,run_status,time_start,time_start,time_stop,total_events,run_user,comment_start,comment_end)) - - # Some go to the new process table - cN.execute("""INSERT INTO process(run_number,board_id,status,time_start,time_stop,n_daq_files,total_events,total_size) VALUES(%s,%s,%s,%s,%s,%s,%s,%s)""",(number,board_id,proc_status,time_start,time_stop,n_daq_files,total_events,total_size)) - - # Save process id for configuration parameters - cN.execute("""SELECT id FROM process WHERE run_number = %s""",(number,)) - (process_id,) = cN.fetchone() - - # Configuration must be parsed and saved as individual parameters - for cl in configuration.split('\n'): - if (cl!=""): - print "configuration line",cl - (para,val) = cl.split(" ",1) - # See if parameter name exists or create it - cN.execute("""SELECT id FROM config_para_name WHERE name = %s""",(para,)) - res = cN.fetchone() - if (res==None): - cN.execute("""INSERT INTO config_para_name(name) VALUES(%s)""",(para,)) - cN.execute("""SELECT id FROM config_para_name WHERE name = %s""",(para,)) - res = cN.fetchone() - (para_id,) = res - cN.execute("""INSERT INTO proc_config_para(process_id,config_para_name_id,value) VALUES(%s,%s,%s)""",(process_id,para_id,val)) - - # Get list of files from the old DB and populate raw_file in the new DB - old_runnr = -1 - file_part = 0 - cO.execute("SELECT * FROM file ORDER BY time_open"); - resO = cO.fetchall() - for rO in resO: - (id,name,type,version,run_number,part,time_open,time_close,size,n_events) = rO - - time_open = time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(time_open)) - time_close = time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(time_close)) - - # Get id of process corresponding to this run - cN.execute("""SELECT id FROM process WHERE run_number = %s""",(run_number,)) - (process_id,) = cN.fetchone() - - cN.execute("""INSERT INTO daq_file(name,version,process_id,part,time_open,time_close,n_events,size) VALUES(%s,%s,%s,%s,%s,%s,%s,%s)""",(name,version,process_id,part,time_open,time_close,n_events,size)) - - connN.commit() - connN.close() - - connO.commit() - connO.close() - -# Execution starts here -if __name__ == "__main__": - main() diff --git a/RunControl/db/tools/SetupDB.data b/RunControl/db/tools/SetupDB.data deleted file mode 100644 index fd50afe6..00000000 --- a/RunControl/db/tools/SetupDB.data +++ /dev/null @@ -1,5 +0,0 @@ -# List of boards with temporal evolution -# id type manuf model nchan s/n opt_lnk from_date from_time -board 0 ADC CAEN V1742 32 39 0:0:0:0 2015-11-01 00:00:00 -board 1 ADC CAEN V1742 32 30 0:0:1:0 2015-11-01 00:00:00 -board 2 ADC CAEN V1742 32 135 0:0:1:1 2016-04-04 00:00:00 diff --git a/RunControl/db/tools/SetupDB.py b/RunControl/db/tools/SetupDB.py deleted file mode 100755 index b104cb8e..00000000 --- a/RunControl/db/tools/SetupDB.py +++ /dev/null @@ -1,107 +0,0 @@ -#!/usr/bin/python - -import os -import re -import MySQLdb - -# Define regular expressions used in file parsing -re_empty = re.compile("^\s*$") -re_comment = re.compile("^\s*#") -re_board = re.compile("^\s*board\s+(\d+)\s+(\w+)\s+(\w+)\s+(\w+)\s+(\d+)\s+(\w+)\s+(\d+):(\d+):(\d+):(\d+)\s+(\d+)-(\d+)-(\d+)\s+(\d+):(\d+):(\d+)\s*$") - -def main(): - - # Get DB connection parameters from environment variables - DB_HOST = os.getenv('PADME_DB_HOST' ,'localhost') - DB_PORT = os.getenv('PADME_DB_PORT' ,'5501') - DB_USER = os.getenv('PADME_DB_USER' ,'padme') - DB_PASSWD = os.getenv('PADME_DB_PASSWD','unknown') - DB_NAME = os.getenv('PADME_DB_NAME' ,'PadmeDB') - conn = MySQLdb.connect(host=DB_HOST,port=int(DB_PORT),user=DB_USER,passwd=DB_PASSWD,db=DB_NAME) - c = conn.cursor() - - # Read DB setup file - f = open("SetupDB.data","r") - for l in f: - - if (re_empty.search(l) or re_comment.search(l)): continue - - m = re_board.search(l) - if (m): - (board_id,type,manufacturer,model,n_channels,serial_number,node_id,controller_id,channel_id,slot_id,f_year,f_month,f_day,f_hour,f_min,f_sec) = m.group(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16) - board_id = int(board_id) - n_channels = int(n_channels) - node_id = int(node_id) - controller_id = int(controller_id) - channel_id = int(channel_id) - slot_id = int(slot_id) - f_year = int(f_year) - f_month = int(f_month) - f_day = int(f_day) - f_hour = int(f_hour) - f_min = int(f_min) - f_sec = int(f_sec) - - # Process board_type - board_type_id = -1 - c.execute("""SELECT id FROM board_type WHERE manufacturer=%s AND model=%s""",(manufacturer,model)) - res = c.fetchone() - if (res == None): - # Board type does not exist: create it in the DB - print "Creating board_type",type,manufacturer,model,n_channels - c.execute("""INSERT INTO board_type(type,manufacturer,model,n_channels) VALUES(%s,%s,%s,%s)""", - (type,manufacturer,model,n_channels)) - c.execute("""SELECT id FROM board_type WHERE manufacturer=%s AND model=%s""",(manufacturer,model)) - (board_type_id,) = c.fetchone() - else: - (board_type_id,) = res - # Board type exists: check if it is the same - c.execute("""SELECT type,n_channels FROM board_type WHERE id=%s""",(board_type_id,)) - (old_type,old_n_channels) = c.fetchone() - if ( (old_type != type) or (old_n_channels != n_channels) ): - print "ERROR: Found exisiting board_type from",manufacturer,"model",model,"with different characteristics" - print " Old",old_type,old_n_channels - print " New",type,n_channels - return - - # Process board_phys - board_phys_id = -1 - c.execute("""SELECT id FROM board_phys WHERE serial_number=%s AND board_type_id=%s""",(serial_number,board_type_id)) - res = c.fetchone() - if (res == None): - # Board_phys does not exist: create it in the DB - print "Creating physical board with board_type_id",board_type_id,"and S/N",serial_number - c.execute("""INSERT INTO board_phys(board_type_id,serial_number) VALUES(%s,%s)""", - (board_type_id,serial_number)) - c.execute("""SELECT id FROM board_phys WHERE serial_number=%s AND board_type_id=%s""",(serial_number,board_type_id)) - (board_phys_id,) = c.fetchone() - else: - (board_phys_id,) = res - - # Process optical_link - optical_link_id = -1 - c.execute("""SELECT id FROM optical_link WHERE node_id=%s AND controller_id=%s AND channel_id=%s AND slot_id=%s""",(node_id,controller_id,channel_id,slot_id)) - res = c.fetchone() - if (res == None): - print "Creating optical link entry for",node_id,controller_id,channel_id,slot_id - c.execute("""INSERT INTO optical_link(node_id,controller_id,channel_id,slot_id) VALUES(%s,%s,%s,%s)""", - (node_id,controller_id,channel_id,slot_id)) - c.execute("""SELECT id FROM optical_link WHERE node_id=%s AND controller_id=%s AND channel_id=%s AND slot_id=%s""",(node_id,controller_id,channel_id,slot_id)) - (optical_link_id,) = c.fetchone() - else: - (optical_link_id,) = res - - # Process board - c.execute("""SELECT id FROM board WHERE id=%s""",(board_id,)) - res = c.fetchone() - if (res == None): - print "Creating board",board_id - c.execute("""INSERT INTO board(id) VALUES(%s)""",(board_id,)) - - # Commit and close connection to DB - conn.commit() - conn.close() - -# Execution starts here -if __name__ == "__main__": - main() diff --git a/RunControl/db/tools/mysql_schema.sql b/RunControl/db/tools/mysql_schema.sql deleted file mode 100644 index 0c6c8d71..00000000 --- a/RunControl/db/tools/mysql_schema.sql +++ /dev/null @@ -1,257 +0,0 @@ --- MySQL Script generated by MySQL Workbench --- 11/12/15 12:56:43 --- Model: New Model Version: 1.0 --- MySQL Workbench Forward Engineering - -SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0; -SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0; -SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='TRADITIONAL,ALLOW_INVALID_DATES'; - --- ----------------------------------------------------- --- Schema PadmeDB --- ----------------------------------------------------- -DROP SCHEMA IF EXISTS `PadmeDB` ; - --- ----------------------------------------------------- --- Schema PadmeDB --- ----------------------------------------------------- -CREATE SCHEMA IF NOT EXISTS `PadmeDB` DEFAULT CHARACTER SET utf8 COLLATE utf8_general_ci ; -USE `PadmeDB` ; - --- ----------------------------------------------------- --- Table `PadmeDB`.`run` --- ----------------------------------------------------- -CREATE TABLE IF NOT EXISTS `PadmeDB`.`run` ( - `number` INT UNSIGNED NOT NULL, - `type` VARCHAR(45) NOT NULL, - `status` INT NOT NULL, - `time_init` DATETIME NULL, - `time_start` DATETIME NULL, - `time_stop` DATETIME NULL, - `total_events` INT UNSIGNED NULL, - `user` VARCHAR(1024) NULL, - `comment_start` VARCHAR(10240) NULL, - `comment_end` VARCHAR(10240) NULL, - PRIMARY KEY (`number`) , - UNIQUE INDEX `number_UNIQUE` (`number` ASC) ) -ENGINE = InnoDB; - - --- ----------------------------------------------------- --- Table `PadmeDB`.`board` --- ----------------------------------------------------- -CREATE TABLE IF NOT EXISTS `PadmeDB`.`board` ( - `id` INT NOT NULL, - PRIMARY KEY (`id`) ) -ENGINE = InnoDB; - - --- ----------------------------------------------------- --- Table `PadmeDB`.`process` --- ----------------------------------------------------- -CREATE TABLE IF NOT EXISTS `PadmeDB`.`process` ( - `id` INT NOT NULL AUTO_INCREMENT, - `board_id` INT NOT NULL, - `run_number` INT UNSIGNED NOT NULL, - `status` INT NOT NULL, - `time_start` DATETIME NULL, - `time_stop` DATETIME NULL, - `n_daq_files` INT UNSIGNED NULL, - `total_events` INT UNSIGNED NULL, - `total_size` BIGINT UNSIGNED NULL, - PRIMARY KEY (`id`) , - INDEX `fk_process_run1_idx` (`run_number` ASC) , - INDEX `fk_process_board1_idx` (`board_id` ASC) , - CONSTRAINT `fk_process_run1` - FOREIGN KEY (`run_number`) - REFERENCES `PadmeDB`.`run` (`number`) - ON DELETE NO ACTION - ON UPDATE NO ACTION, - CONSTRAINT `fk_process_board1` - FOREIGN KEY (`board_id`) - REFERENCES `PadmeDB`.`board` (`id`) - ON DELETE NO ACTION - ON UPDATE NO ACTION) -ENGINE = InnoDB; - - --- ----------------------------------------------------- --- Table `PadmeDB`.`daq_file` --- ----------------------------------------------------- -CREATE TABLE IF NOT EXISTS `PadmeDB`.`daq_file` ( - `id` INT UNSIGNED NOT NULL AUTO_INCREMENT, - `name` VARCHAR(255) NOT NULL, - `version` INT NULL, - `process_id` INT NOT NULL, - `part` INT UNSIGNED NULL, - `time_open` DATETIME NULL, - `time_close` DATETIME NULL, - `time_delete` DATETIME NULL, - `n_events` INT UNSIGNED NULL, - `size` BIGINT UNSIGNED NULL, - PRIMARY KEY (`id`) , - UNIQUE INDEX `name_UNIQUE` (`name` ASC) , - INDEX `fk_daq_file_process1_idx` (`process_id` ASC) , - CONSTRAINT `fk_daq_file_process1` - FOREIGN KEY (`process_id`) - REFERENCES `PadmeDB`.`process` (`id`) - ON DELETE NO ACTION - ON UPDATE NO ACTION) -ENGINE = InnoDB; - - --- ----------------------------------------------------- --- Table `PadmeDB`.`board_type` --- ----------------------------------------------------- -CREATE TABLE IF NOT EXISTS `PadmeDB`.`board_type` ( - `id` INT NOT NULL AUTO_INCREMENT, - `type` VARCHAR(45) NOT NULL, - `manufacturer` VARCHAR(45) NOT NULL, - `model` VARCHAR(45) NOT NULL, - `n_channels` INT UNSIGNED NULL, - PRIMARY KEY (`id`) , - UNIQUE INDEX `manu_model` (`manufacturer` ASC, `model` ASC) ) -ENGINE = InnoDB; - - --- ----------------------------------------------------- --- Table `PadmeDB`.`board_phys` --- ----------------------------------------------------- -CREATE TABLE IF NOT EXISTS `PadmeDB`.`board_phys` ( - `id` INT NOT NULL AUTO_INCREMENT, - `serial_number` VARCHAR(45) NOT NULL, - `board_type_id` INT NOT NULL, - PRIMARY KEY (`id`) , - INDEX `fk_board_phys_board_type1_idx` (`board_type_id` ASC) , - UNIQUE INDEX `u_btsn` (`serial_number` ASC, `board_type_id` ASC), - CONSTRAINT `fk_board_phys_board_type1` - FOREIGN KEY (`board_type_id`) - REFERENCES `PadmeDB`.`board_type` (`id`) - ON DELETE NO ACTION - ON UPDATE NO ACTION) -ENGINE = InnoDB; - - --- ----------------------------------------------------- --- Table `PadmeDB`.`config_para_name` --- ----------------------------------------------------- -CREATE TABLE IF NOT EXISTS `PadmeDB`.`config_para_name` ( - `id` INT NOT NULL AUTO_INCREMENT, - `name` VARCHAR(128) NULL, - UNIQUE INDEX `name_UNIQUE` (`name` ASC) , - PRIMARY KEY (`id`) ) -ENGINE = InnoDB; - - --- ----------------------------------------------------- --- Table `PadmeDB`.`proc_config_para` --- ----------------------------------------------------- -CREATE TABLE IF NOT EXISTS `PadmeDB`.`proc_config_para` ( - `id` INT NOT NULL AUTO_INCREMENT, - `process_id` INT NOT NULL, - `config_para_name_id` INT NOT NULL, - `value` VARCHAR(1024) NOT NULL, - PRIMARY KEY (`id`) , - INDEX `fk_proc_config_para_config_para_name1_idx` (`config_para_name_id` ASC) , - INDEX `fk_proc_config_para_process1_idx` (`process_id` ASC) , - CONSTRAINT `fk_proc_config_para_config_para_name1` - FOREIGN KEY (`config_para_name_id`) - REFERENCES `PadmeDB`.`config_para_name` (`id`) - ON DELETE NO ACTION - ON UPDATE NO ACTION, - CONSTRAINT `fk_proc_config_para_process1` - FOREIGN KEY (`process_id`) - REFERENCES `PadmeDB`.`process` (`id`) - ON DELETE NO ACTION - ON UPDATE NO ACTION) -ENGINE = InnoDB; - - --- ----------------------------------------------------- --- Table `PadmeDB`.`run_config_para` --- ----------------------------------------------------- -CREATE TABLE IF NOT EXISTS `PadmeDB`.`run_config_para` ( - `id` INT NOT NULL AUTO_INCREMENT, - `run_number` INT UNSIGNED NOT NULL, - `config_para_name_id` INT NOT NULL, - `value` VARCHAR(1024) NOT NULL, - PRIMARY KEY (`id`), - INDEX `fk_run_config_para_config_para_name1_idx` (`config_para_name_id` ASC) , - INDEX `fk_run_config_para_run1_idx` (`run_number` ASC) , - CONSTRAINT `fk_run_config_para_config_para_name1` - FOREIGN KEY (`config_para_name_id`) - REFERENCES `PadmeDB`.`config_para_name` (`id`) - ON DELETE NO ACTION - ON UPDATE NO ACTION, - CONSTRAINT `fk_run_config_para_run1` - FOREIGN KEY (`run_number`) - REFERENCES `PadmeDB`.`run` (`number`) - ON DELETE NO ACTION - ON UPDATE NO ACTION) -ENGINE = InnoDB; - - --- ----------------------------------------------------- --- Table `PadmeDB`.`optical_link` --- ----------------------------------------------------- -CREATE TABLE IF NOT EXISTS `PadmeDB`.`optical_link` ( - `id` INT NOT NULL AUTO_INCREMENT, - `node_id` INT UNSIGNED NOT NULL, - `controller_id` INT UNSIGNED NOT NULL, - `channel_id` INT UNSIGNED NOT NULL, - `slot_id` INT UNSIGNED NOT NULL, - PRIMARY KEY (`id`) ) -ENGINE = InnoDB; - - --- ----------------------------------------------------- --- Table `PadmeDB`.`l_board_board_phys` --- ----------------------------------------------------- -CREATE TABLE IF NOT EXISTS `PadmeDB`.`l_board_board_phys` ( - `board_id` INT NOT NULL , - `board_phys_id` INT NOT NULL , - `time_start` DATETIME NULL , - `time_stop` DATETIME NULL , - PRIMARY KEY (`board_id`, `board_phys_id`) , - INDEX `fk_l_board_board_phys_board1_idx` (`board_id` ASC) , - INDEX `fk_l_board_board_phys_board_phys1_idx` (`board_phys_id` ASC) , - CONSTRAINT `fk_l_board_board_phys_board_phys1` - FOREIGN KEY (`board_phys_id`) - REFERENCES `PadmeDB`.`board_phys` (`id`) - ON DELETE NO ACTION - ON UPDATE NO ACTION, - CONSTRAINT `fk_l_board_board_phys_board1` - FOREIGN KEY (`board_id`) - REFERENCES `PadmeDB`.`board` (`id`) - ON DELETE NO ACTION - ON UPDATE NO ACTION) -ENGINE = InnoDB; - - --- ----------------------------------------------------- --- Table `PadmeDB`.`l_board_optical_link` --- ----------------------------------------------------- -CREATE TABLE IF NOT EXISTS `PadmeDB`.`l_board_optical_link` ( - `board_id` INT NOT NULL , - `optical_link_id` INT NOT NULL , - `time_start` DATETIME NULL , - `time_stop` DATETIME NULL , - PRIMARY KEY (`board_id`, `optical_link_id`) , - INDEX `fk_l_board_optical_link_board1_idx` (`board_id` ASC) , - INDEX `fk_l_board_optical_link_optical_link1_idx` (`optical_link_id` ASC) , - CONSTRAINT `fk_l_board_optical_link_board1` - FOREIGN KEY (`board_id`) - REFERENCES `PadmeDB`.`board` (`id`) - ON DELETE NO ACTION - ON UPDATE NO ACTION, - CONSTRAINT `fk_l_board_optical_link_optical_link1` - FOREIGN KEY (`optical_link_id`) - REFERENCES `PadmeDB`.`optical_link` (`id`) - ON DELETE NO ACTION - ON UPDATE NO ACTION) -ENGINE = InnoDB; - - -SET SQL_MODE=@OLD_SQL_MODE; -SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS; -SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS; diff --git a/RunControl/db/tools/mysql_setup.readme b/RunControl/db/tools/mysql_setup.readme deleted file mode 100644 index e01625a0..00000000 --- a/RunControl/db/tools/mysql_setup.readme +++ /dev/null @@ -1,69 +0,0 @@ -1) If you do not need external connectivity (e.g. from a web server) to this server, -or it was already configured, you can skip to point 6), otherwise log on as root -to the node running mysqld. - -2) Edit /etc/my.cnf, find the [mysqld] section, and add line - -bind-address= - -e.g. - -bin-address=193.206.86.233 - -3) Open port 3306 on the firewall with command - -$ /sbin/iptables -I INPUT -i -p tcp --destination-port 3306 -j ACCEPT - -e.g. - -$ /sbin/iptables -I INPUT 10 -i eth3 -p tcp --destination-port 3306 -j ACCEPT - -Note: -- is the port used by your node to connect to the external network -- is the position in the INPUT iptable where you want to insert - the new rule. In general it should go just before the "REJECT ALL" line. - You can check the current list of rules with "iptables -L --line-numbers". - -4) Restart mysqld with command - -$ /etc/init.d/mysqld restart - -5) Logout from root and reconnect as a standard user. - -6) If mysqld was already configured with a root password, skip to point 7). - -If mysqld was just installed and this is the first connection to the server -you must connect to mysqld as root with no password, set the root password -and delete any anonymous account that might exist: - -$ mysql -u root mysql -mysql> set password for root@localhost=password(''); -mysql> set password for root@'127.0.0.1'=password(''); -mysql> set password for root@'padmeserv1'=password(''); -mysql> delete from user where user=''; -mysql> exit - -7) Reconnect to MySQL as root (now you must use ) to create the -"PademDB" database and the "padme" user to manage it: - -$ mysql -u root -p mysql -mysql> CREATE DATABASE PadmeDB; -mysql> CREATE USER 'padme'@'localhost' IDENTIFIED BY ''; -mysql> CREATE USER 'padme'@'127.0.0.1' IDENTIFIED BY ''; -mysql> CREATE USER 'padme'@'%.lnf.infn.it' IDENTIFIED BY ''; -mysql> GRANT ALL ON PadmeDB.* TO 'padme'@'localhost'; -mysql> GRANT ALL ON PadmeDB.* TO 'padme'@'%.lnf.infn.it'; -mysql> exit - -8) Reconnect to MySQL as "padme" user (using ) and create the -schema for the PadmeDB database: - -$ mysql -u padme -p PadmeDB -mysql> source mysql_schema.sql -mysql> exit - -9) Create all board-related information in the database: - -$ ./SetupDB.py - -10) The PadmeDB database is now ready to accept data! From 8779384423833e9d863d88fa158339d5bdd7d93c Mon Sep 17 00:00:00 2001 From: Emanuele Leonardi Date: Fri, 22 Nov 2019 12:08:54 +0100 Subject: [PATCH 16/64] RunControl: changed default to not use GUI --- RunControl/code/RunControl.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/RunControl/code/RunControl.py b/RunControl/code/RunControl.py index 3605c104..23ddce68 100755 --- a/RunControl/code/RunControl.py +++ b/RunControl/code/RunControl.py @@ -14,16 +14,18 @@ def main(argv): try: opts,args = getopt.getopt(argv,"h",["no-gui","server","interactive"]) except getopt.GetoptError: - print 'RunControl [--no-gui] [--server] [--interactive] [-h|--help]' + print 'RunControl [--gui|--no-gui] [--server] [--interactive] [-h|--help]' sys.exit(2) - useGUI = True + useGUI = False startServer = False serverInteractive = False for opt,arg in opts: if opt == '-h' or opt == '--help': - print 'RunControl [--no-gui] [--server] [--interactive] [-h]' + print 'RunControl [--gui|--no-gui] [--server] [--interactive] [-h]' sys.exit() + elif opt == '--gui': + useGUI = True elif opt == '--no-gui': useGUI = False elif opt == '--server': @@ -41,7 +43,8 @@ def main(argv): elif useGUI: - app = RunControlGUI() + print "Sorry: RunControl GUI is currently not available. Please use --no-gui option to start the text version." + //app = RunControlGUI() else: From dd4134ebfe33317ba9da8278a015b5f64fe3c667 Mon Sep 17 00:00:00 2001 From: Emanuele Leonardi Date: Fri, 22 Nov 2019 12:09:29 +0100 Subject: [PATCH 17/64] RunControl: changed PadmeDB class to use new schema --- RunControl/code/PadmeDB.py | 346 ++++++++++++++++++++----------------- 1 file changed, 188 insertions(+), 158 deletions(-) diff --git a/RunControl/code/PadmeDB.py b/RunControl/code/PadmeDB.py index ae2c1364..94bf1dd3 100644 --- a/RunControl/code/PadmeDB.py +++ b/RunControl/code/PadmeDB.py @@ -7,6 +7,13 @@ class PadmeDB: def __init__(self): + # Get DB connection parameters from environment variables + self.DB_HOST = os.getenv('PADME_DB_HOST' ,'localhost') + self.DB_PORT = int(os.getenv('PADME_DB_PORT' ,'5501')) + self.DB_USER = os.getenv('PADME_DB_USER' ,'padme') + self.DB_PASSWD = os.getenv('PADME_DB_PASSWD','unknown') + self.DB_NAME = os.getenv('PADME_DB_NAME' ,'PadmeDAQ') + self.conn = None def __del__(self): @@ -17,14 +24,15 @@ def connect_db(self): self.close_db() - # Get DB connection parameters from environment variables - DB_HOST = os.getenv('PADME_DB_HOST' ,'localhost') - DB_PORT = os.getenv('PADME_DB_PORT' ,'5501') - DB_USER = os.getenv('PADME_DB_USER' ,'padme') - DB_PASSWD = os.getenv('PADME_DB_PASSWD','unknown') - DB_NAME = os.getenv('PADME_DB_NAME' ,'PadmeDAQ') - - self.conn = MySQLdb.connect(host=DB_HOST,port=int(DB_PORT),user=DB_USER,passwd=DB_PASSWD,db=DB_NAME) + try: + self.conn = MySQLdb.connect(host = self.DB_HOST, + port = self.DB_PORT, + user = self.DB_USER, + passwd = self.DB_PASSWD, + db = self.DB_NAME) + except: + print "*** PadmeDB ERROR *** Unable to connect to DB. Exception: %s"%sys.exc_info()[0] + sys.exit(2) def close_db(self): @@ -34,8 +42,6 @@ def close_db(self): def check_db(self): - # This does not work - #if (self.conn and self.conn.is_connected()): return if self.conn: try: self.conn.ping() @@ -51,9 +57,9 @@ def is_run_in_db(self,run_nr): c.execute("""SELECT COUNT(number) FROM run WHERE number=%s""",(run_nr,)) (n,) = c.fetchone() self.conn.commit() - if (n): return 1 + if (n): return True - return 0 + return False def get_last_run_in_db(self): @@ -73,23 +79,29 @@ def create_run(self,run_nr,run_type,run_user,run_comment): c = self.conn.cursor() # Get run_type id - c.execute("""SELECT id FROM run_type WHERE short_name = %s""",(run_type,)) + c.execute("""SELECT id FROM run_type WHERE type = %s""",(run_type,)) if (c.rowcount == 0): print "PadmeDB::create_run - WARNING - Unknown run type selected: %s - Defaulting to OTHER"%run_type - c.execute("""SELECT id FROM run_type WHERE short_name = OTHER""") + c.execute("""SELECT id FROM run_type WHERE type = %s""",("OTHER",)) res = c.fetchone() (run_type_id,) = res self.conn.commit() # Create run - c.execute("""INSERT INTO run (number,run_type_id,status,total_events,user) VALUES (%s,%s,%s,%s,%s)""",(run_nr,run_type_id,0,0,run_user)) + try: + c.execute("""INSERT INTO run (number,run_type_id,status,total_events,user) VALUES (%s,%s,%s,%s,%s)""",(run_nr,run_type_id,0,0,run_user)) + except MySQLdb.Error as e: + print "MySQL Error:%d:%s"%(e.args[0],e.args[1]) self.conn.commit() # Create start of run comment - c.execute("""INSERT INTO log_entry (run_number,type,level,time,text) VALUES (%s,%s,%s,%s,%s)""",(run_nr,"SOR",0,self.now_str(),run_comment)) + try: + c.execute("""INSERT INTO log_entry (run_number,type,level,time,text) VALUES (%s,%s,%s,%s,%s)""",(run_nr,"SOR",0,self.now_str(),run_comment)) + except MySQLdb.Error as e: + print "MySQL Error:%d:%s"%(e.args[0],e.args[1]) self.conn.commit() - def create_daq_process(self,mode,run_number,link_id): + def create_process(self,run_number,proc_type,node_id): self.check_db() c = self.conn.cursor() @@ -97,115 +109,91 @@ def create_daq_process(self,mode,run_number,link_id): # Check if run number exists c.execute("""SELECT number FROM run WHERE number = %s""",(run_number,)) if c.rowcount == 0: - print "PadmeDB::create_daq_process - ERROR - Unknown run number: %d\n"%run_number + print "PadmeDB::create_process - ERROR - Unknown run number: %d\n"%run_number return -1 - # Check if link id exists - op_link_exists = True - c.execute("""SELECT id FROM optical_link WHERE id = %s""",(link_id,)) + # Get process type id + c.execute("""SELECT id FROM process_type WHERE type = %s""",(proc_type,)) if c.rowcount == 0: - print "PadmeDB::create_daq_process - WARNING - Unknown optical_link id: %d\n"%link_id - op_link_exists = False - - if op_link_exists: - c.execute("""INSERT INTO daq_process (mode,run_number,optical_link_id,status) VALUES (%s,%s,%s,%s)""",(mode,run_number,link_id,0)) - else: - # Accept processes with no associated optical link (optical_link_id is NULL) - c.execute("""INSERT INTO daq_process (mode,run_number,status) VALUES (%s,%s,%s)""",(mode,run_number,0)) + print "PadmeDB::create_process - ERROR - Unknown process type: %s\n"%proc_type + return -1 + (proc_type_id,) = c.fetchone() + # Create process and get its id + try: + c.execute("""INSERT INTO process (run_number,proc_type_id,status) VALUES (%s,%s,%s)""",(run_number,proc_type_id,0)) + except MySQLdb.Error as e: + print "MySQL Error:%d:%s"%(e.args[0],e.args[1]) + return -1 process_id = c.lastrowid + # Check if node exists and update process info + if node_id: + c.execute("""SELECT id FROM node WHERE id = %s""",(node_id,)) + if (c.rowcount == 0): + print "PadmeDB::create_process - WARNING - Unknown node id: %d\n"%node_id + else: + try: + c.execute("""UPDATE process SET node_id = %s WHERE id = %s""",(node_id,process_id)) + except MySQLdb.Error as e: + print "MySQL Error:%d:%s"%(e.args[0],e.args[1]) + self.conn.commit() return process_id - - def create_trigger_process(self,run_number,node_id): + + def create_daq_process(self,mode,run_number,link_id): self.check_db() c = self.conn.cursor() - # Check if run exists - c.execute("""SELECT number FROM run WHERE number = %s""",(run_number,)) - if (c.rowcount == 0): - print "PadmeDB::create_trigger_process - ERROR - Unknown run number: %d\n"%run_number + # Check if link id exists and get node + node_id = None + c.execute("""SELECT node_id FROM optical_link WHERE id = %s""",(link_id,)) + if c.rowcount == 0: + print "PadmeDB::create_daq_process - WARNING - Unknown optical_link id: %d\n"%link_id + else: + (node_id,) = c.fetchone() + + # Create process in database + process_id = -1 + if mode == "DAQ": + process_id = self.create_process(run_number,"ADCDAQ",node_id) + elif mode == "ZSUP": + process_id = self.create_process(run_number,"ZEROSUP",node_id) + else: + print "PADMEDB::create_daq_process - ERROR - Unknown DAQ process mode %s"%mode return -1 - # Check if node exists - node_exists = True - c.execute("""SELECT id FROM node WHERE id = %s""",(node_id,)) - if (c.rowcount == 0): - print "PadmeDB::create_trigger_process - WARNING - Unknown node id: %d\n"%node_id - node_exists = False - - # Create trigger process and get its id - if node_exists: - c.execute("""INSERT INTO trigger_process (run_number,node_id,status) VALUES (%s,%s,%s)""",(run_number,node_id,0)) - else: - # Accept mergers with no associated node id (node_id is NULL) - c.execute("""INSERT INTO trigger_process (run_number,status) VALUES (%s,%s)""",(run_number,0)) - process_id = c.lastrowid + if (mode == "DAQ") and (process_id != -1): + # Create association between process and optical link for ADCDAQ processes + try: + c.execute("""INSERT INTO daq_link (process_id,optical_link_id) VALUES (%s,%s)""",(process_id,link_id)) + except MySQLdb.Error as e: + print "MySQL Error:%d:%s"%(e.args[0],e.args[1]) self.conn.commit() return process_id - def create_merger_process(self,run_number,node_id): - - self.check_db() - c = self.conn.cursor() - - # Check if run exists - c.execute("""SELECT number FROM run WHERE number = %s""",(run_number,)) - if (c.rowcount == 0): - print "PadmeDB::create_merger_process - ERROR - Unknown run number: %d\n"%run_number - return -1 + def create_trigger_process(self,run_number,node_id): - # Check if node exists - node_exists = True - c.execute("""SELECT id FROM node WHERE id = %s""",(node_id,)) - if (c.rowcount == 0): - print "PadmeDB::create_merger_process - WARNING - Unknown node id: %d\n"%node_id - node_exists = False + # Create process in database + process_id = self.create_process(run_number,"TRIGGER",node_id) - # Create merger and get its id - if node_exists: - c.execute("""INSERT INTO merger_process (run_number,node_id,status) VALUES (%s,%s,%s)""",(run_number,node_id,0)) - else: - # Accept mergers with no associated node id (node_id is NULL) - c.execute("""INSERT INTO merger_process (run_number,status) VALUES (%s,%s)""",(run_number,0)) - process_id = c.lastrowid + return process_id + + def create_merger_process(self,run_number,node_id): - self.conn.commit() + # Create process in database + process_id = self.create_process(run_number,"MERGER",node_id) return process_id def create_level1_process(self,run_number,node_id,number): - self.check_db() - c = self.conn.cursor() - - # Check if run exists - c.execute("""SELECT number FROM run WHERE number = %s""",(run_number,)) - if (c.rowcount == 0): - print "PadmeDB::create_level1_process - ERROR - Unknown run number: %d\n"%run_number - return -1 - - # Check if node exists - node_exists = True - c.execute("""SELECT id FROM node WHERE id = %s""",(node_id,)) - if (c.rowcount == 0): - print "PadmeDB::create_level1_process - WARNING - Unknown node id: %d\n"%node_id - node_exists = False - - # Create merger and get its id - if node_exists: - c.execute("""INSERT INTO level1_process (run_number,node_id,number,status) VALUES (%s,%s,%s,%s)""",(run_number,node_id,number,0)) - else: - # Accept mergers with no associated node id (node_id is NULL) - c.execute("""INSERT INTO level1_process (run_number,number,status) VALUES (%s,%s,%s)""",(run_number,number,0)) - process_id = c.lastrowid - - self.conn.commit() + # Create process in database + process_id = self.create_process(run_number,"LEVEL1",node_id) return process_id @@ -213,28 +201,40 @@ def set_run_status(self,run_nr,status): self.check_db() c = self.conn.cursor() - c.execute("""UPDATE run SET status = %s WHERE number = %s""",(status,run_nr)) + try: + c.execute("""UPDATE run SET status = %s WHERE number = %s""",(status,run_nr)) + except MySQLdb.Error as e: + print "MySQL Error:%d:%s"%(e.args[0],e.args[1]) self.conn.commit() def set_run_time_init(self,run_nr,time_init): self.check_db() c = self.conn.cursor() - c.execute("""UPDATE run SET time_init = %s WHERE number = %s""",(time_init,run_nr)) + try: + c.execute("""UPDATE run SET time_init = %s WHERE number = %s""",(time_init,run_nr)) + except MySQLdb.Error as e: + print "MySQL Error:%d:%s"%(e.args[0],e.args[1]) self.conn.commit() def set_run_time_start(self,run_nr,time_start): self.check_db() c = self.conn.cursor() - c.execute("""UPDATE run SET time_start = %s WHERE number = %s""",(time_start,run_nr)) + try: + c.execute("""UPDATE run SET time_start = %s WHERE number = %s""",(time_start,run_nr)) + except MySQLdb.Error as e: + print "MySQL Error:%d:%s"%(e.args[0],e.args[1]) self.conn.commit() def set_run_time_stop(self,run_nr,time_stop): self.check_db() c = self.conn.cursor() - c.execute("""UPDATE run SET time_stop = %s WHERE number = %s""",(time_stop,run_nr)) + try: + c.execute("""UPDATE run SET time_stop = %s WHERE number = %s""",(time_stop,run_nr)) + except MySQLdb.Error as e: + print "MySQL Error:%d:%s"%(e.args[0],e.args[1]) self.conn.commit() def set_run_comment_end(self,run_nr,comment_end): @@ -242,7 +242,11 @@ def set_run_comment_end(self,run_nr,comment_end): # Create end of run comment self.check_db() c = self.conn.cursor() - c.execute("""INSERT INTO log_entry (run_number,type,level,time,text) VALUES (%s,%s,%s,%s,%s)""",(run_nr,"EOR",0,self.now_str(),comment_end)) + try: + c.execute("""INSERT INTO log_entry (run_number,type,level,time,text) VALUES (%s,%s,%s,%s,%s)""", + (run_nr,"EOR",0,self.now_str(),comment_end)) + except MySQLdb.Error as e: + print "MySQL Error:%d:%s"%(e.args[0],e.args[1]) self.conn.commit() def set_run_total_events(self,run_nr,total_events): @@ -250,7 +254,10 @@ def set_run_total_events(self,run_nr,total_events): # Add total number of events info to run self.check_db() c = self.conn.cursor() - c.execute("""UPDATE run SET total_events = %s WHERE number = %s""",(total_events,run_nr)) + try: + c.execute("""UPDATE run SET total_events = %s WHERE number = %s""",(total_events,run_nr)) + except MySQLdb.Error as e: + print "MySQL Error:%d:%s"%(e.args[0],e.args[1]) self.conn.commit() def add_cfg_para_run(self,run_nr,para_name,para_val): @@ -258,63 +265,43 @@ def add_cfg_para_run(self,run_nr,para_name,para_val): self.check_db() para_id = self.get_para_id(para_name) c = self.conn.cursor() - c.execute("""INSERT INTO run_config_para (run_number,config_para_name_id,value) VALUES (%s,%s,%s)""", - (run_nr,para_id,para_val)) + try: + c.execute("""INSERT INTO run_config_para (run_number,config_para_name_id,value) VALUES (%s,%s,%s)""", + (run_nr,para_id,para_val)) + except MySQLdb.Error as e: + print "MySQL Error:%d:%s"%(e.args[0],e.args[1]) self.conn.commit() - def add_cfg_para_daq(self,daq_id,para_name,para_val): + def add_cfg_para_proc(self,proc_id,para_name,para_val): self.check_db() para_id = self.get_para_id(para_name) c = self.conn.cursor() - c.execute("""INSERT INTO daq_proc_config_para (daq_process_id,config_para_name_id,value) VALUES (%s,%s,%s)""", - (daq_id,para_id,para_val)) - self.conn.commit() - - def add_cfg_para_trigger(self,trigger_id,para_name,para_val): - - self.check_db() - para_id = self.get_para_id(para_name) - c = self.conn.cursor() - c.execute("""INSERT INTO trigger_proc_config_para (trigger_process_id,config_para_name_id,value) VALUES (%s,%s,%s)""", - (trigger_id,para_id,para_val)) - self.conn.commit() - - def add_cfg_para_merger(self,merger_id,para_name,para_val): - - self.check_db() - para_id = self.get_para_id(para_name) - c = self.conn.cursor() - c.execute("""INSERT INTO merger_proc_config_para (merger_process_id,config_para_name_id,value) VALUES (%s,%s,%s)""", - (merger_id,para_id,para_val)) - self.conn.commit() - - def add_cfg_para_level1(self,level1_id,para_name,para_val): - - self.check_db() - para_id = self.get_para_id(para_name) - c = self.conn.cursor() - c.execute("""INSERT INTO level1_proc_config_para (level1_process_id,config_para_name_id,value) VALUES (%s,%s,%s)""", - (level1_id,para_id,para_val)) + try: + c.execute("""INSERT INTO proc_config_para (process_id,config_para_name_id,value) VALUES (%s,%s,%s)""", + (proc_id,para_id,para_val)) + except MySQLdb.Error as e: + print "MySQL Error:%d:%s"%(e.args[0],e.args[1]) self.conn.commit() def get_para_id(self,para_name): + para_id = None self.check_db() c = self.conn.cursor() c.execute("""SELECT id FROM config_para_name WHERE name=%s""",(para_name,)) res = c.fetchone() - self.conn.commit() - if (res == None): - c.execute("""INSERT INTO config_para_name (name) VALUES (%s)""",(para_name,)) - self.conn.commit() - c.execute("""SELECT id FROM config_para_name WHERE name=%s""",(para_name,)) - res = c.fetchone() - self.conn.commit() + try: + c.execute("""INSERT INTO config_para_name (name) VALUES (%s)""",(para_name,)) + para_id = c.lastrowid + except MySQLdb.Error as e: + print "MySQL Error:%d:%s"%(e.args[0],e.args[1]) + else: + (para_id,) = res + self.conn.commit() - (id,) = res - return id + return para_id def now_str(self): return time.strftime("%Y-%m-%d %H:%M:%S",time.gmtime()) @@ -335,8 +322,8 @@ def get_node_id(self,node): res = c.fetchone() self.conn.commit() if (res != None): - (id,) = res - return id + (node_id,) = res + return node_id else: @@ -349,20 +336,20 @@ def get_node_id(self,node): res = c.fetchone() self.conn.commit() if (res != None): - (id,) = res - return id + (node_id,) = res + return node_id # If not found, return -1 return -1 - def get_node_daq_ip(self,id): + def get_node_daq_ip(self,node_id): # Return IP address on DAQ VLAN (192.168.60.X) of node with given ID self.check_db() c = self.conn.cursor() - c.execute("""SELECT ip_addr_daq FROM node WHERE id=%s""",(id,)) + c.execute("""SELECT ip_addr_daq FROM node WHERE id=%s""",(node_id,)) res = c.fetchone() self.conn.commit() if (res != None): @@ -380,36 +367,79 @@ def get_run_types(self): self.check_db() c = self.conn.cursor() - c.execute("""SELECT short_name FROM run_type""") + c.execute("""SELECT type FROM run_type""") data = c.fetchall() + self.conn.commit() + type_list = [] for row in data: type_list.append(row[0]) + return type_list + + def get_process_types(self): + + # Return list of process types known to DB + + self.check_db() + c = self.conn.cursor() + + c.execute("""SELECT type FROM process_type""") + data = c.fetchall() + self.conn.commit() + + type_list = [] + for row in data: type_list.append(row[0]) + + return type_list + + def get_file_types(self): + + # Return list of file types known to DB + + self.check_db() + c = self.conn.cursor() + + c.execute("""SELECT type FROM file_type""") + data = c.fetchall() self.conn.commit() + type_list = [] + for row in data: type_list.append(row[0]) + return type_list def get_link_id(self,node_id,controller_id,channel_id,slot_id): # Return id of optical link given its description + link_id = -1 + self.check_db() c = self.conn.cursor() c.execute("""SELECT id FROM optical_link WHERE node_id=%s AND controller_id=%s AND channel_id=%s AND slot_id=%s""", (node_id,controller_id,channel_id,slot_id)) - if c.rowcount == 0: return -1 ret = c.fetchone() - return ret[0] + if ret != None: (link_id,) = ret - def get_merger_final_info(self,merger_id): + self.conn.commit() + + return link_id + + def get_merger_final_info(self,merger_proc_id): # Return final events and size info from merger + + tot_evts = -1 + tot_size = -1 self.check_db() c = self.conn.cursor() - c.execute("""SELECT total_events,total_size FROM merger_process WHERE id=%s""",(merger_id,)) - if c.rowcount == 0: return (-1,-1) + c.execute("""SELECT total_events,total_size FROM process WHERE id=%s""",(merger_proc_id,)) ret = c.fetchone() - return ret + if ret != None: (tot_evts,tot_size) = ret + + self.conn.commit() + + return (tot_evts,tot_size) From ee4acef1fa4b0643abd06d1a79c5c90020d968b4 Mon Sep 17 00:00:00 2001 From: Emanuele Leonardi Date: Fri, 22 Nov 2019 12:14:05 +0100 Subject: [PATCH 18/64] RunControl: fixed typo in RunControl.py --- RunControl/code/RunControl.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/RunControl/code/RunControl.py b/RunControl/code/RunControl.py index 23ddce68..dc7f2c41 100755 --- a/RunControl/code/RunControl.py +++ b/RunControl/code/RunControl.py @@ -44,7 +44,7 @@ def main(argv): elif useGUI: print "Sorry: RunControl GUI is currently not available. Please use --no-gui option to start the text version." - //app = RunControlGUI() + #app = RunControlGUI() else: From feb8a3ae3e2f7c306bfad9c5d5007a162fd3853c Mon Sep 17 00:00:00 2001 From: Stefania Spagnolo Date: Sat, 23 Nov 2019 09:19:42 +0100 Subject: [PATCH 19/64] Analysis update --- .../AnalysisBase/src/EventSelection.cc | 164 +++++++++++++++++- .../AnalysisTools/produceSelectionPlots.C | 32 +++- 2 files changed, 191 insertions(+), 5 deletions(-) diff --git a/PadmeAnalysis/AnalysisBase/src/EventSelection.cc b/PadmeAnalysis/AnalysisBase/src/EventSelection.cc index 501670ab..29d88a64 100644 --- a/PadmeAnalysis/AnalysisBase/src/EventSelection.cc +++ b/PadmeAnalysis/AnalysisBase/src/EventSelection.cc @@ -356,6 +356,9 @@ Bool_t EventSelection::ProcessAnalysisSS() int n2g=0; int n2gDsume=0; int n2gFR = 0; + + int ichx=0; + int ichy=0; //std::cout<<" pointer to collection = "<<(long)fECal_ClColl<GetNElements()<FillHisto2(hname, xEne, float(cls)); pos1 = xClu->GetPosition(); - if (isMC) + + /*if (isMC) { double tno = pos1.y(); pos1.SetY(pos1.x()); pos1.SetX(tno); } + */ + // Sum of All Cluster Energy eSumCl = eSumCl+xEne; @@ -414,6 +420,12 @@ Bool_t EventSelection::ProcessAnalysisSS() hit = fECal_hitEvent->Hit(hitVInCl[ih]); hSvc->FillHisto(hname, hit->GetTime()-hitSeed->GetTime()); } + for (int ih=0; ihGetNHitsInClus(); ++ih) + { + ichx = xChId/100; + ichy = xChId%100; + } + // loop for basic 2gamma search for (int clCal=hECal+1; clCalGetNElements(); ++clCal) { @@ -427,12 +439,15 @@ Bool_t EventSelection::ProcessAnalysisSS() aChId = aClu->GetChannelId(); aTime = aClu->GetTime(); pos2 = aClu->GetPosition(); - if (isMC) + /*if (isMC) { double tno = pos2.y(); pos2.SetY(pos2.x()); pos2.SetX(tno); - } + + } + */ + aSumCl = xEne+aEne; @@ -480,6 +495,14 @@ Bool_t EventSelection::ProcessAnalysisSS() if (fabs(dt)<10.) { + hname = "SS2gR_passDt"; + hSvc->FillHisto(hname, pos1.Perp(), 0.5); + hSvc->FillHisto(hname, pos2.Perp(), 0.5); + + hname = "SS2gPhi_passDt"; + hSvc->FillHisto(hname, pos1.Phi(), 0.5); + hSvc->FillHisto(hname, pos2.Phi(), 0.5); + hname = "SS2gSumE_passDt"; hSvc->FillHisto(hname, aSumCl); @@ -511,6 +534,14 @@ Bool_t EventSelection::ProcessAnalysisSS() hSvc->FillHisto2(hname, pos2.x(), pos2.y(), aEne); + hname = "SS2gEvsR_passDt"; + hSvc->FillHisto2(hname, pos1.Perp(), xEne,0.5); + hSvc->FillHisto2(hname, pos2.Perp(), aEne,0.5); + + hname = "SS2gEvsPhi_passDt"; + hSvc->FillHisto2(hname, pos1.Phi(), xEne,0.5); + hSvc->FillHisto2(hname, pos2.Phi(), aEne,0.5); + if (cos(dPhi)FillHisto(hname, aSumCl); + hname = "SS2gPhi_passDtDphi"; + hSvc->FillHisto(hname, pos1.Phi(), 0.5); + hSvc->FillHisto(hname, pos2.Phi(), 0.5); + hname = "SS2gR_passDtDphi"; + hSvc->FillHisto(hname, pos1.Perp(), 0.5); + hSvc->FillHisto(hname, pos2.Perp(), 0.5); hname = "SS2gDt_passDtDphi"; hSvc->FillHisto(hname, dt); hname = "SS2gDphi_passDtDphi"; @@ -546,6 +583,14 @@ Bool_t EventSelection::ProcessAnalysisSS() hname = "SS2gXYEw_passDtDphi"; hSvc->FillHisto2(hname, pos1.x(), pos1.y(), xEne); hSvc->FillHisto2(hname, pos2.x(), pos2.y(), aEne); + hname = "SS2gEvsR_passDtDphi"; + hSvc->FillHisto2(hname, pos1.Perp(), xEne,0.5); + hSvc->FillHisto2(hname, pos2.Perp(), aEne,0.5); + + hname = "SS2gEvsPhi_passDtDphi"; + hSvc->FillHisto2(hname, pos1.Phi(), xEne,0.5); + hSvc->FillHisto2(hname, pos2.Phi(), aEne,0.5); + if (fabs(xcog)<20. && fabs(ycog)<20.) { @@ -553,6 +598,12 @@ Bool_t EventSelection::ProcessAnalysisSS() hname = "SS2gSumE_passDtDphiCog"; hSvc->FillHisto(hname, aSumCl); + hname = "SS2gPhi_passDtDphiCog"; + hSvc->FillHisto(hname, pos1.Phi(), 0.5); + hSvc->FillHisto(hname, pos2.Phi(), 0.5); + hname = "SS2gR_passDtDphiCog"; + hSvc->FillHisto(hname, pos1.Perp(), 0.5); + hSvc->FillHisto(hname, pos2.Perp(), 0.5); hname = "SS2gDt_passDtDphiCog"; hSvc->FillHisto(hname, dt); hname = "SS2gDphi_passDtDphiCog"; @@ -578,15 +629,41 @@ Bool_t EventSelection::ProcessAnalysisSS() hname = "SS2gXYEw_passDtDphiCog"; hSvc->FillHisto2(hname, pos1.x(), pos1.y(), xEne); hSvc->FillHisto2(hname, pos2.x(), pos2.y(), aEne); + + hname = "SS2gEvsR_passDtDphiCog"; + hSvc->FillHisto2(hname, pos1.Perp(), xEne,0.5); + hSvc->FillHisto2(hname, pos2.Perp(), aEne,0.5); + + hname = "SS2gEvsPhi_passDtDphiCog"; + hSvc->FillHisto2(hname, pos1.Phi(), xEne,0.5); + hSvc->FillHisto2(hname, pos2.Phi(), aEne,0.5); // if (fabs(pos1.y())>120. && fabs(pos2.y())>120.) { // if (fabs(pos1.x())>100. && fabs(pos2.x())>100. && fabs(pos1.y())<200. && fabs(pos2.y())<200.) { + // if (pos1.Perp()>125. && pos2.Perp()>125. && pos1.Perp()<290. && pos2.Perp()<290.) { + // if (pos1.Perp()<200. && pos2.Perp()<200.) { + double phicut=0; + if (xEne>aEne) phicut = pos1.Phi(); + else phicut = pos2.Phi(); + // if (fabs(phicut-pigreco/2.)FillHisto(hname, aSumCl); + hname = "SS2gPhi_passDtDphiCogFR"; + hSvc->FillHisto(hname, pos1.Phi(), 0.5); + hSvc->FillHisto(hname, pos2.Phi(), 0.5); + hname = "SS2gR_passDtDphiCogFR"; + hSvc->FillHisto(hname, pos1.Perp(), 0.5); + hSvc->FillHisto(hname, pos2.Perp(), 0.5); hname = "SS2gDt_passDtDphiCogFR"; hSvc->FillHisto(hname, dt); hname = "SS2gDphi_passDtDphiCogFR"; @@ -614,12 +691,25 @@ Bool_t EventSelection::ProcessAnalysisSS() hSvc->FillHisto(hname, pos2.y(), aEne); + hname = "SS2gEvsR_passDtDphiCogFR"; + hSvc->FillHisto2(hname, pos1.Perp(), xEne,0.5); + hSvc->FillHisto2(hname, pos2.Perp(), aEne,0.5); + + hname = "SS2gEvsPhi_passDtDphiCogFR"; + hSvc->FillHisto2(hname, pos1.Phi(), xEne,0.5); + hSvc->FillHisto2(hname, pos2.Phi(), aEne,0.5); if (fabs(aSumCl-490.)<50.) { hname = "SS2gSumE_passDtDphiCogDsume"; hSvc->FillHisto(hname, aSumCl); + hname = "SS2gPhi_passDtDphiCogDsume"; + hSvc->FillHisto(hname, pos1.Phi(), 0.5); + hSvc->FillHisto(hname, pos2.Phi(), 0.5); + hname = "SS2gR_passDtDphiCogDsume"; + hSvc->FillHisto(hname, pos1.Perp(), 0.5); + hSvc->FillHisto(hname, pos2.Perp(), 0.5); hname = "SS2gDt_passDtDphiCogDsume"; hSvc->FillHisto(hname, dt); hname = "SS2gDphi_passDtDphiCogDsume"; @@ -656,9 +746,16 @@ Bool_t EventSelection::ProcessAnalysisSS() hname = "NposInBunch_beam_passDtDphiCogDsume"; hSvc->FillHisto(hname,fTarget_RecoBeam->getnPOT()); + hname = "SS2gEvsR_passDtDphiCogDsume"; + hSvc->FillHisto2(hname, pos1.Perp(), xEne,0.5); + hSvc->FillHisto2(hname, pos2.Perp(), aEne,0.5); + + hname = "SS2gEvsPhi_passDtDphiCogDsume"; + hSvc->FillHisto2(hname, pos1.Phi(), xEne,0.5); + hSvc->FillHisto2(hname, pos2.Phi(), aEne,0.5); } - //} + } } } } @@ -2704,6 +2801,7 @@ Bool_t EventSelection::InitHistosAnalysis() hname="SS2g_ClTime_passDtDphiCogDsume"; hSvc->BookHisto(hname, 100, -500, 500); + hname = "SS2gSumE_passDtDphiCogDsume"; hSvc->BookHisto(hname, nBinX, minX, maxX); hname = "SS2gSumE_passDtDphiCogFR"; @@ -2715,6 +2813,64 @@ Bool_t EventSelection::InitHistosAnalysis() hname = "SS2gSumE_passDt"; hSvc->BookHisto(hname, nBinX, minX, maxX); + nBinX=100.; + minX=0.; + maxX=350.; + hname = "SS2gR_passDtDphiCogDsume"; + hSvc->BookHisto(hname, nBinX, minX, maxX); + hname = "SS2gR_passDtDphiCogFR"; + hSvc->BookHisto(hname, nBinX, minX, maxX); + hname = "SS2gR_passDtDphiCog"; + hSvc->BookHisto(hname, nBinX, minX, maxX); + hname = "SS2gR_passDtDphi"; + hSvc->BookHisto(hname, nBinX, minX, maxX); + hname = "SS2gR_passDt"; + hSvc->BookHisto(hname, nBinX, minX, maxX); + + nBinY=100; + minY=50.; + maxY=450.; + hname = "SS2gEvsR_passDtDphiCogDsume"; + hSvc->BookHisto2(hname, nBinX, minX, maxX, nBinY, minY, maxY); + hname = "SS2gEvsR_passDtDphiCogFR"; + hSvc->BookHisto2(hname, nBinX, minX, maxX, nBinY, minY, maxY); + hname = "SS2gEvsR_passDtDphiCog"; + hSvc->BookHisto2(hname, nBinX, minX, maxX, nBinY, minY, maxY); + hname = "SS2gEvsR_passDtDphi"; + hSvc->BookHisto2(hname, nBinX, minX, maxX, nBinY, minY, maxY); + hname = "SS2gEvsR_passDt"; + hSvc->BookHisto2(hname, nBinX, minX, maxX, nBinY, minY, maxY); + + + double pigreco = acos(-1.); + nBinY=100; + minY=50.; + maxY=450.; + nBinX=100.; + maxX=pigreco+0.001; + minX = -maxX; + hname = "SS2gEvsPhi_passDtDphiCogDsume"; + hSvc->BookHisto2(hname, nBinX, minX, maxX, nBinY, minY, maxY); + hname = "SS2gEvsPhi_passDtDphiCogFR"; + hSvc->BookHisto2(hname, nBinX, minX, maxX, nBinY, minY, maxY); + hname = "SS2gEvsPhi_passDtDphiCog"; + hSvc->BookHisto2(hname, nBinX, minX, maxX, nBinY, minY, maxY); + hname = "SS2gEvsPhi_passDtDphi"; + hSvc->BookHisto2(hname, nBinX, minX, maxX, nBinY, minY, maxY); + hname = "SS2gEvsPhi_passDt"; + hSvc->BookHisto2(hname, nBinX, minX, maxX, nBinY, minY, maxY); + + hname = "SS2gPhi_passDtDphiCogDsume"; + hSvc->BookHisto(hname, nBinX, minX, maxX); + hname = "SS2gPhi_passDtDphiCogFR"; + hSvc->BookHisto(hname, nBinX, minX, maxX); + hname = "SS2gPhi_passDtDphiCog"; + hSvc->BookHisto(hname, nBinX, minX, maxX); + hname = "SS2gPhi_passDtDphi"; + hSvc->BookHisto(hname, nBinX, minX, maxX); + hname = "SS2gPhi_passDt"; + hSvc->BookHisto(hname, nBinX, minX, maxX); + nBinX= 100; minX = -50; diff --git a/PadmeAnalysis/AnalysisTools/produceSelectionPlots.C b/PadmeAnalysis/AnalysisTools/produceSelectionPlots.C index e7bde8c0..afbfa2b0 100644 --- a/PadmeAnalysis/AnalysisTools/produceSelectionPlots.C +++ b/PadmeAnalysis/AnalysisTools/produceSelectionPlots.C @@ -33,9 +33,20 @@ void produceSelectionPlots(TFile* fData=_file0, TFile* fMC=_file1) xmax = 999; compare("SS2g_clSize", -1, xmin, xmax, xtitle, fData, fMC); + xtitle = "Number of positrons/bunch"; + xmin = 0.; + xmax = 30000.; + compare("NposInBunch_beam", 1, xmin, xmax, xtitle, fData, fMC); + xtitle = "Number of positrons/bunch - SR"; + xmin = 0.; + xmax = 30000.; + compare("NposInBunch_beam_passDtDphiCogDsume", -1, xmin, xmax, xtitle, fData, fMC); + + + //return; - xtitle = "E(#gamma_{1}+E(#gamma_{2}) [MeV]"; + xtitle = "E(#gamma_{1})+E(#gamma_{2}) [MeV]"; xmin = 0.; xmax = 1200.; compare("SS2gSumE_passDt", -1, xmin, xmax, xtitle, fData, fMC); @@ -46,6 +57,25 @@ void produceSelectionPlots(TFile* fData=_file0, TFile* fMC=_file1) //compare("SS2gSumE_passDtDphiCogDsume", 0.38, 400., 600, xtitle, fData, fMC); //compare("SS2gSumE_passDtDphiCogDsume", 0.2144, 400., 600, xtitle, fData, fMC); + xtitle = "R(#gamma_{1,2}) [mm]"; + xmin = -999; + xmax = 999; + compare("SS2gR_passDt", -1, xmin, xmax, xtitle, fData, fMC); + compare("SS2gR_passDtDphi", -1, xmin, xmax, xtitle, fData, fMC); + compare("SS2gR_passDtDphiCog", -1, xmin, xmax, xtitle, fData, fMC); + compare("SS2gR_passDtDphiCogFR", -1, xmin, xmax, xtitle, fData, fMC); + compare("SS2gR_passDtDphiCogDsume", -1, xmin, xmax, xtitle, fData, fMC); + //compare("SS2gSumE_passDtDphiCogDsume", 0.38, 400., 600, xtitle, fData, fMC); + //compare("SS2gSumE_passDtDphiCogDsume", 0.2144, 400., 600, xtitle, fData, fMC); + + xtitle = "#phi(#gamma_{1,2}) [rad]"; + xmin = -999; + xmax = 999; + compare("SS2gPhi_passDt", -1, xmin, xmax, xtitle, fData, fMC); + compare("SS2gPhi_passDtDphi", -1, xmin, xmax, xtitle, fData, fMC); + compare("SS2gPhi_passDtDphiCog", -1, xmin, xmax, xtitle, fData, fMC); + compare("SS2gPhi_passDtDphiCogFR", -1, xmin, xmax, xtitle, fData, fMC); + compare("SS2gPhi_passDtDphiCogDsume", -1, xmin, xmax, xtitle, fData, fMC); xtitle = "#Deltat [ns]"; From 51879e26676be543d07c7b4b3241072cc1c574f2 Mon Sep 17 00:00:00 2001 From: Emanuele Leonardi Date: Tue, 26 Nov 2019 09:22:46 +0100 Subject: [PATCH 20/64] PadmeDB: added TESTBEAM run type --- PadmeDB/PadmeDAQ_data.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/PadmeDB/PadmeDAQ_data.txt b/PadmeDB/PadmeDAQ_data.txt index db773fa2..a1eecac4 100644 --- a/PadmeDB/PadmeDAQ_data.txt +++ b/PadmeDB/PadmeDAQ_data.txt @@ -96,6 +96,7 @@ run_type 3 CALIBRATION Calibration run run_type 4 RANDOM Random triggers run run_type 5 OTHER Run of a type which was not forseen run_type 6 FAKE Run will use PadmeDAQ in FAKE mode (experts only!) +run_type 7 TESTBEAM Run taken during a testbeam # List of process types proc_type 0 ADCDAQ PadmeDAQ Configure an ADC board, collect its data, format them, and send them to ZEROSUP From d87fe42d363ae905f925805548b07ac2ac0b473b Mon Sep 17 00:00:00 2001 From: Emanuele Leonardi Date: Tue, 26 Nov 2019 09:23:32 +0100 Subject: [PATCH 21/64] RunControl: modified RunControl for new DB schema --- RunControl/code/ADCBoard.py | 37 ++++++++---- RunControl/code/Level1.py | 14 ++++- RunControl/code/Merger.py | 9 +++ RunControl/code/PadmeDB.py | 92 ++++++++++++++++------------- RunControl/code/Run.py | 10 +--- RunControl/code/RunControlServer.py | 40 ++++++++----- RunControl/code/Trigger.py | 8 +++ 7 files changed, 133 insertions(+), 77 deletions(-) diff --git a/RunControl/code/ADCBoard.py b/RunControl/code/ADCBoard.py index 774ab37c..a1c63667 100644 --- a/RunControl/code/ADCBoard.py +++ b/RunControl/code/ADCBoard.py @@ -172,10 +172,6 @@ def read_setup(self,setup): print l f.close() - #def set_board_id(self,b_id): - # - # self.board_id = b_id - def format_config_daq(self): cfgstring = "" @@ -303,11 +299,14 @@ def print_config(self): def create_proc_daq(self): # Create DAQ process in DB - self.proc_daq_id = self.db.create_daq_process("DAQ",self.run_number,self.get_link_id()) + self.proc_daq_id = self.db.create_daq_process(self.run_number,self.node_id) if self.proc_daq_id == -1: print "ADCBoard::create_proc_daq - ERROR: unable to create new DAQ proces in DB" return "error" + # Add info about optical link + self.db.add_daq_process_optical_link(self.proc_daq_id,self.node_id,self.conet2_link,self.conet2_slot) + self.db.add_cfg_para_daq(self.proc_daq_id,"daq_dir", self.daq_dir) self.db.add_cfg_para_daq(self.proc_daq_id,"ssh_id_file", self.ssh_id_file) self.db.add_cfg_para_daq(self.proc_daq_id,"executable", self.executable) @@ -363,7 +362,7 @@ def create_proc_daq(self): def create_proc_zsup(self): # Create ZSUP process in DB - self.proc_zsup_id = self.db.create_daq_process("ZSUP",self.run_number,self.get_link_id()) + self.proc_zsup_id = self.db.create_zsup_process(self.run_number,self.node_id) if self.proc_zsup_id == -1: print "ADCBoard::create_proc_zsup - ERROR: unable to create new ZSUP proces in DB" return "error" @@ -409,11 +408,11 @@ def create_proc_zsup(self): return "ok" - def get_link_id(self): - - # Convert PadmeDAQ link description to link id from DB - if (self.node_id == -1 or self.conet2_link == -1 or self.conet2_slot == -1): return -1 - return self.db.get_link_id(self.node_id,self.conet2_link/8,self.conet2_link%8,self.conet2_slot) + #def get_link_id(self): + # + # # Convert PadmeDAQ link description to link id from DB + # if (self.node_id == -1 or self.conet2_link == -1 or self.conet2_slot == -1): return -1 + # return self.db.get_link_id(self.node_id,self.conet2_link/8,self.conet2_link%8,self.conet2_slot) def start_daq(self): @@ -445,11 +444,19 @@ def start_daq(self): print "ADCBoard::start_daq - ERROR: Execution failed: %s",e return 0 + # Tag start of process in DB + if self.run_number: + self.db.set_process_time_start(self.proc_daq_id) + # Return process id return self.process_daq.pid def stop_daq(self): + # Tag stop process in DB + if self.run_number: + self.db.set_process_time_stop(self.proc_daq_id) + # Wait up to 5 seconds for DAQ to stop of its own (on quit file or on time elapsed) for i in range(10): @@ -523,11 +530,19 @@ def start_zsup(self): print "ADCBoard::start_zsup - ERROR: Execution failed: %s",e return 0 + # Tag start of process in DB + if self.run_number: + self.db.set_process_time_start(self.proc_zsup_id) + # Return process id return self.process_zsup.pid def stop_zsup(self): + # Tag stop process in DB + if self.run_number: + self.db.set_process_time_stop(self.proc_zsup_id) + # Wait up to 5 seconds for ZSUP to stop for i in range(10): diff --git a/RunControl/code/Level1.py b/RunControl/code/Level1.py index 45ff024a..394ebaed 100644 --- a/RunControl/code/Level1.py +++ b/RunControl/code/Level1.py @@ -17,6 +17,8 @@ def __init__(self,l_id): # Define id file for passwordless ssh command execution self.ssh_id_file = "%s/.ssh/id_rsa_daq"%os.getenv('HOME',"~") + self.db = PadmeDB() + self.set_default_config() def set_default_config(self): @@ -82,7 +84,7 @@ def print_config(self): def create_level1(self): - self.process_id = self.db.create_level1_process(self.run_number,self.node_id,self.level1_id) + self.process_id = self.db.create_level1_process(self.run_number,self.node_id) if self.process_id == -1: return "error" self.db.add_cfg_para_level1(self.process_id,"daq_dir", self.daq_dir) @@ -94,7 +96,7 @@ def create_level1(self): self.db.add_cfg_para_level1(self.process_id,"node_id", repr(self.node_id)) self.db.add_cfg_para_level1(self.process_id,"node_ip", self.node_ip) - + self.db.add_cfg_para_level1(self.process_id,"config_file", self.config_file) self.db.add_cfg_para_level1(self.process_id,"log_file", self.log_file) @@ -131,11 +133,19 @@ def start_level1(self): print "Level1::start_level1 - ERROR: Execution failed: %s",e return 0 + # Tag start of process in DB + if self.run_number: + self.db.set_process_time_start(self.process_id) + # Return process id return self.process.pid def stop_level1(self): + # Tag stop process in DB + if self.run_number: + self.db.set_process_time_stop(self.process_id) + # Wait up to 5 seconds for Level1 to stop for i in range(5): diff --git a/RunControl/code/Merger.py b/RunControl/code/Merger.py index 871ed293..5bc2958f 100644 --- a/RunControl/code/Merger.py +++ b/RunControl/code/Merger.py @@ -124,11 +124,19 @@ def start_merger(self): print "Merger::start_merger - ERROR: Execution failed: %s",e return 0 + # Tag start of process in DB + if self.run_number: + self.db.set_process_time_start(self.process_id) + # Return process id return self.process.pid def stop_merger(self): + # Tag stop process in DB + if self.run_number: + self.db.set_process_time_stop(self.process_id) + # Wait up to 60 seconds for Merger to stop for i in range(60): @@ -147,4 +155,5 @@ def stop_merger(self): if self.process.poll() != None: self.process.wait() self.log_handle.close() + return 0 diff --git a/RunControl/code/PadmeDB.py b/RunControl/code/PadmeDB.py index 94bf1dd3..52b579ca 100644 --- a/RunControl/code/PadmeDB.py +++ b/RunControl/code/PadmeDB.py @@ -73,7 +73,7 @@ def get_last_run_in_db(self): else: return maxrun - def create_run(self,run_nr,run_type,run_user,run_comment): + def create_run(self,run_nr,run_name,run_user,run_type,run_comment): self.check_db() c = self.conn.cursor() @@ -89,7 +89,7 @@ def create_run(self,run_nr,run_type,run_user,run_comment): # Create run try: - c.execute("""INSERT INTO run (number,run_type_id,status,total_events,user) VALUES (%s,%s,%s,%s,%s)""",(run_nr,run_type_id,0,0,run_user)) + c.execute("""INSERT INTO run (number,name,user,run_type_id,status) VALUES (%s,%s,%s,%s,%s)""",(run_nr,run_name,run_user,run_type_id,0)) except MySQLdb.Error as e: print "MySQL Error:%d:%s"%(e.args[0],e.args[1]) self.conn.commit() @@ -142,37 +142,17 @@ def create_process(self,run_number,proc_type,node_id): return process_id - def create_daq_process(self,mode,run_number,link_id): - - self.check_db() - c = self.conn.cursor() - - # Check if link id exists and get node - node_id = None - c.execute("""SELECT node_id FROM optical_link WHERE id = %s""",(link_id,)) - if c.rowcount == 0: - print "PadmeDB::create_daq_process - WARNING - Unknown optical_link id: %d\n"%link_id - else: - (node_id,) = c.fetchone() + def create_daq_process(self,run_number,node_id): # Create process in database - process_id = -1 - if mode == "DAQ": - process_id = self.create_process(run_number,"ADCDAQ",node_id) - elif mode == "ZSUP": - process_id = self.create_process(run_number,"ZEROSUP",node_id) - else: - print "PADMEDB::create_daq_process - ERROR - Unknown DAQ process mode %s"%mode - return -1 + process_id = self.create_process(run_number,"ADCDAQ",node_id) - if (mode == "DAQ") and (process_id != -1): - # Create association between process and optical link for ADCDAQ processes - try: - c.execute("""INSERT INTO daq_link (process_id,optical_link_id) VALUES (%s,%s)""",(process_id,link_id)) - except MySQLdb.Error as e: - print "MySQL Error:%d:%s"%(e.args[0],e.args[1]) + return process_id + + def create_zsup_process(self,run_number,node_id): - self.conn.commit() + # Create process in database + process_id = self.create_process(run_number,"ZEROSUP",node_id) return process_id @@ -190,13 +170,24 @@ def create_merger_process(self,run_number,node_id): return process_id - def create_level1_process(self,run_number,node_id,number): + def create_level1_process(self,run_number,node_id): # Create process in database process_id = self.create_process(run_number,"LEVEL1",node_id) return process_id + def add_daq_process_optical_link(self,proc_id,node_id,conet2_link,conet2_slot): + + link_id = self.get_link_id(node_id,conet2_link/8,conet2_link%8,conet2_slot) + if link_id == -1: + print "PadmeDB::add_daq_process_optical_link - WARNING - Cannot get link for (process,node,link,slot)=(%d,%d,%d,%d)"%(proc_id,node_id,conet2_link,conet2_slot) + else: + try: + c.execute("""INSERT INTO daq_link (process_id,optical_link_id) VALUES (%s,%s)""",(proc_id,link_id)) + except MySQLdb.Error as e: + print "MySQL Error:%d:%s"%(e.args[0],e.args[1]) + def set_run_status(self,run_nr,status): self.check_db() @@ -207,32 +198,52 @@ def set_run_status(self,run_nr,status): print "MySQL Error:%d:%s"%(e.args[0],e.args[1]) self.conn.commit() - def set_run_time_init(self,run_nr,time_init): + def set_run_time_init(self,run_nr): self.check_db() c = self.conn.cursor() try: - c.execute("""UPDATE run SET time_init = %s WHERE number = %s""",(time_init,run_nr)) + c.execute("""UPDATE run SET time_init = %s WHERE number = %s""",(self.now_str(),run_nr)) except MySQLdb.Error as e: print "MySQL Error:%d:%s"%(e.args[0],e.args[1]) self.conn.commit() - def set_run_time_start(self,run_nr,time_start): + def set_run_time_start(self,run_nr): self.check_db() c = self.conn.cursor() try: - c.execute("""UPDATE run SET time_start = %s WHERE number = %s""",(time_start,run_nr)) + c.execute("""UPDATE run SET time_start = %s WHERE number = %s""",(self.now_str(),run_nr)) except MySQLdb.Error as e: print "MySQL Error:%d:%s"%(e.args[0],e.args[1]) self.conn.commit() - def set_run_time_stop(self,run_nr,time_stop): + def set_run_time_stop(self,run_nr): self.check_db() c = self.conn.cursor() try: - c.execute("""UPDATE run SET time_stop = %s WHERE number = %s""",(time_stop,run_nr)) + c.execute("""UPDATE run SET time_stop = %s WHERE number = %s""",(self.now_str(),run_nr)) + except MySQLdb.Error as e: + print "MySQL Error:%d:%s"%(e.args[0],e.args[1]) + self.conn.commit() + + def set_process_time_start(self,proc_id): + + self.check_db() + c = self.conn.cursor() + try: + c.execute("""UPDATE process SET time_start = %s WHERE id = %s""",(self.now_str(),proc_id)) + except MySQLdb.Error as e: + print "MySQL Error:%d:%s"%(e.args[0],e.args[1]) + self.conn.commit() + + def set_process_time_stop(self,proc_id): + + self.check_db() + c = self.conn.cursor() + try: + c.execute("""UPDATE process SET time_stop = %s WHERE id = %s""",(self.now_str(),proc_id)) except MySQLdb.Error as e: print "MySQL Error:%d:%s"%(e.args[0],e.args[1]) self.conn.commit() @@ -303,9 +314,6 @@ def get_para_id(self,para_name): return para_id - def now_str(self): - return time.strftime("%Y-%m-%d %H:%M:%S",time.gmtime()) - def get_node_id(self,node): # Return DB id of node with given name/ip address (from DAQ VLAN) @@ -417,8 +425,7 @@ def get_link_id(self,node_id,controller_id,channel_id,slot_id): self.check_db() c = self.conn.cursor() - c.execute("""SELECT id FROM optical_link WHERE node_id=%s AND controller_id=%s AND channel_id=%s AND slot_id=%s""", - (node_id,controller_id,channel_id,slot_id)) + c.execute("""SELECT id FROM optical_link WHERE node_id=%s AND controller_id=%s AND channel_id=%s AND slot_id=%s""",(node_id,controller_id,channel_id,slot_id)) ret = c.fetchone() if ret != None: (link_id,) = ret @@ -443,3 +450,6 @@ def get_merger_final_info(self,merger_proc_id): self.conn.commit() return (tot_evts,tot_size) + + def now_str(self): + return time.strftime("%Y-%m-%d %H:%M:%S",time.gmtime()) diff --git a/RunControl/code/Run.py b/RunControl/code/Run.py index c8e5b977..3e00079e 100644 --- a/RunControl/code/Run.py +++ b/RunControl/code/Run.py @@ -70,10 +70,7 @@ def change_run(self): #print "--- Changing run" - #if (self.run_number == 0): - # self.run_name = "run_0_"+time.strftime("%Y%m%d_%H%M%S",time.gmtime()) - #else: - # self.run_name = "run_%d"%self.run_number + # Define run name using run number and start time self.run_name = "run_%7.7d_%s"%(self.run_number,time.strftime("%Y%m%d_%H%M%S",time.gmtime())) # Write run name to current_run file for monitoring @@ -319,11 +316,11 @@ def format_config(self): return cfgstring - def create_run(self): + def create_run_in_db(self): # Create run in DB and save its configuration parameters - self.db.create_run(self.run_number,self.run_type,self.run_user,self.run_comment_start) + self.db.create_run(self.run_number,self.run_name,self.run_user,self.run_type,self.run_comment_start) self.db.add_cfg_para_run(self.run_number,"user_account", self.user_account) self.db.add_cfg_para_run(self.run_number,"daq_dir", self.daq_dir) @@ -376,7 +373,6 @@ def create_run(self): self.db.add_cfg_para_run(self.run_number,"rawdata_head", self.rawdata_head) self.db.add_cfg_para_run(self.run_number,"trigger_node", self.trigger_node) - #self.db.add_cfg_para_run(self.run_number,"trigger_mask", self.trigger_mask) if self.merger_node: self.db.add_cfg_para_run(self.run_number,"merger_node", self.merger_node) diff --git a/RunControl/code/RunControlServer.py b/RunControl/code/RunControlServer.py index c284d535..df3a4d5a 100644 --- a/RunControl/code/RunControlServer.py +++ b/RunControl/code/RunControlServer.py @@ -618,7 +618,6 @@ def new_run(self): elif (ans=="client_close"): return "client_close" else: - #self.write_log("run_number - invalid option %s received"%ans) print "run_number - invalid option %s received"%ans self.send_answer("error") return "error" @@ -645,7 +644,6 @@ def new_run(self): return "error" self.send_answer(newrun_type) else: - #self.write_log("run_type - invalid option %s received"%ans) print "run_type - invalid option %s received"%ans self.send_answer("error") return "error" @@ -687,12 +685,12 @@ def new_run(self): return "error" print "Creating Run %d strucure in DB"%self.run.run_number - if self.run.create_run() == "error": + if self.run.create_run_in_db() == "error": print "ERROR - Cannot create Run in the DB" return "error" # Save time when run initialization starts - self.db.set_run_time_init(self.run.run_number,self.now_str()) + self.db.set_run_time_init(self.run.run_number) # Create directory to host log files print "Creating log directory %s"%self.run.log_dir @@ -833,14 +831,16 @@ def new_run(self): if (all_boards_ready): print "All boards completed initialization: DAQ run can be started" - if (self.run.run_number): self.db.set_run_status(self.run.run_number,1) # Status 1: run correctly initialized + if (self.run.run_number): + self.db.set_run_status(self.run.run_number,1) # Status 1: run correctly initialized self.send_answer("init_ready") return "initialized" else: print "*** ERROR *** One or more boards failed the initialization. Cannot start run" - if (self.run.run_number): self.db.set_run_status(self.run.run_number,5) # Status 5: run with problems at initialization + if (self.run.run_number): + self.db.set_run_status(self.run.run_number,5) # Status 5: run with problems at initialization self.send_answer("init_fail") return "initfail" @@ -848,7 +848,8 @@ def new_run(self): n_try += 1 if (n_try>=60): print "*** ERROR *** One or more boards did not initialize within 30sec. Cannot start run" - if (self.run.run_number): self.db.set_run_status(self.run.run_number,5) # Status 5: run with problems at initialization + if (self.run.run_number): + self.db.set_run_status(self.run.run_number,5) # Status 5: run with problems at initialization self.send_answer("init_timeout") return "initfail" time.sleep(0.5) @@ -864,7 +865,7 @@ def start_run(self): # Update run status in DB if (self.run.run_number): - self.db.set_run_time_start(self.run.run_number,self.now_str()) + self.db.set_run_time_start(self.run.run_number) self.db.set_run_status(self.run.run_number,2) # Status 2: run started self.send_answer("run_started") @@ -881,7 +882,8 @@ def stop_run(self): self.run.run_comment_end = ans print "Stopping run" - if (self.run.run_number): self.db.set_run_status(self.run.run_number,3) # Status 3: run stopped normally + if (self.run.run_number): + self.db.set_run_status(self.run.run_number,3) # Status 3: run stopped normally return self.terminate_run() @@ -890,14 +892,15 @@ def abort_run(self): self.run.run_comment_end = "Run aborted" print "Aborting run" - if (self.run.run_number): self.db.set_run_status(self.run.run_number,4) # Status 4: run aborted + if (self.run.run_number): + self.db.set_run_status(self.run.run_number,4) # Status 4: run aborted return self.terminate_run() def terminate_run(self): if (self.run.run_number): - self.db.set_run_time_stop(self.run.run_number,self.now_str()) + self.db.set_run_time_stop(self.run.run_number) self.db.set_run_comment_end(self.run.run_number,self.run.run_comment_end) ## Create "stop the run" tag file @@ -916,7 +919,8 @@ def terminate_run(self): terminate_ok = False self.send_answer("adc %d daq_terminate_error"%adc.board_id) print "ADC board %02d - WARNING: problems while terminating DAQ"%adc.board_id - if (self.run.run_number): self.db.set_run_status(self.run.run_number,6) # Status 6: run ended with errors + if (self.run.run_number): + self.db.set_run_status(self.run.run_number,6) # Status 6: run ended with errors # Run stop_zsup procedure for each ADC board for adc in (self.run.adcboard_list): @@ -927,7 +931,8 @@ def terminate_run(self): terminate_ok = False self.send_answer("adc %d zsup_terminate_error"%adc.board_id) print "ADC board %02d - WARNING: problems while terminating ZSUP"%adc.board_id - if (self.run.run_number): self.db.set_run_status(self.run.run_number,6) # Status 6: run ended with errors + if (self.run.run_number): + self.db.set_run_status(self.run.run_number,6) # Status 6: run ended with errors # If this is a real run, get final info from merger before stopping it if (self.run.run_number): @@ -942,7 +947,8 @@ def terminate_run(self): terminate_ok = False self.send_answer("trigger terminate_error") print "WARNING: problems while terminating Trigger" - if (self.run.run_number): self.db.set_run_status(self.run.run_number,6) # Status 6: run ended with errors + if (self.run.run_number): + self.db.set_run_status(self.run.run_number,6) # Status 6: run ended with errors # Run stop_merger procedure if self.run.merger.stop_merger(): @@ -952,7 +958,8 @@ def terminate_run(self): terminate_ok = False self.send_answer("merger terminate_error") print "WARNING: problems while terminating Merger" - if (self.run.run_number): self.db.set_run_status(self.run.run_number,6) # Status 6: run ended with errors + if (self.run.run_number): + self.db.set_run_status(self.run.run_number,6) # Status 6: run ended with errors # Run stop_level1 procedures for lvl1 in self.run.level1_list: @@ -963,7 +970,8 @@ def terminate_run(self): terminate_ok = False self.send_answer("level1 %d terminate_error"%lvl1.level1_id) print "Level1 %02d - WARNING: problems while terminating"%lvl1.level1_id - if (self.run.run_number): self.db.set_run_status(self.run.run_number,6) # Status 6: run ended with errors + if (self.run.run_number): + self.db.set_run_status(self.run.run_number,6) # Status 6: run ended with errors # Clean up run directory self.run.clean_up() diff --git a/RunControl/code/Trigger.py b/RunControl/code/Trigger.py index 5c8e9be3..c6a6d9bf 100644 --- a/RunControl/code/Trigger.py +++ b/RunControl/code/Trigger.py @@ -319,11 +319,19 @@ def start_trig(self): print "Trigger::start_trig - ERROR: Execution failed: %s",e return 0 + # Tag start of process in DB + if self.run_number: + self.db.set_process_time_start(self.process_id) + # Return process id return self.process.pid def stop_trig(self): + # Tag stop process in DB + if self.run_number: + self.db.set_process_time_stop(self.process_id) + # Wait up to 5 seconds for DAQ to stop of its own (on quit file or on time elapsed) for i in range(10): From 6b255447a864a23ad3b96b0a864942d4ebdbc140 Mon Sep 17 00:00:00 2001 From: Emanuele Leonardi Date: Tue, 26 Nov 2019 12:43:10 +0100 Subject: [PATCH 22/64] PadmeTrig: modified PadmeTrig for new DB schema --- PadmeTrig/src/Config.c | 122 +++++++++++++------- PadmeTrig/src/PadmeTrig.c | 237 ++++++++++++++++++++++---------------- 2 files changed, 219 insertions(+), 140 deletions(-) diff --git a/PadmeTrig/src/Config.c b/PadmeTrig/src/Config.c index 64cffa0f..696011ac 100644 --- a/PadmeTrig/src/Config.c +++ b/PadmeTrig/src/Config.c @@ -37,13 +37,13 @@ int reset_config() strcpy(Config->config_file,""); - strcpy(Config->quit_file,"run/quit"); strcpy(Config->start_file,"run/start"); - strcpy(Config->initok_file,"run/initok.b00"); // InitOK file for default board 0 - strcpy(Config->initfail_file,"run/initfail.b00"); // InitFail file for default board 0 - strcpy(Config->lock_file,"run/lock.b00"); // Lock file for default board 0 + strcpy(Config->quit_file,"run/quit"); + strcpy(Config->initok_file,"run/initok.trigger"); + strcpy(Config->initfail_file,"run/initfail.trigger"); + strcpy(Config->lock_file,"run/lock.trigger"); - Config->run_number = 0; // Dummy run (no DB access) + Config->run_number = 0; // Dummy run Config->trigger_mask = 0x01; // Only BTF trigger active @@ -539,7 +539,7 @@ int print_config(){ printf("output_mode\t\t%s\t\toutput mode (FILE or STREAM)\n",Config->output_mode); if (strcmp(Config->output_mode,"STREAM")==0) { - printf("output_stream\t\t%s\tname of virtual file used as output stream\n",Config->output_stream); + printf("output_stream\t\t'%s'\tname of virtual file used as output stream\n",Config->output_stream); } else { printf("data_dir\t\t'%s'\t\tdirectory where output files will be stored\n",Config->data_dir); printf("data_file\t\t'%s'\ttemplate name for data files: string will be appended\n",Config->data_file); @@ -565,61 +565,105 @@ int print_config(){ } -/* // Save configuration parameters to DB int save_config() { - int i; - char line[2048]; - - db_add_cfg_para(Config->process_id,"config_file",Config->config_file); - - db_add_cfg_para(Config->process_id,"start_file",Config->start_file); - db_add_cfg_para(Config->process_id,"quit_file",Config->quit_file); - - db_add_cfg_para(Config->process_id,"initok_file",Config->initok_file); - db_add_cfg_para(Config->process_id,"initfail_file",Config->initfail_file); - db_add_cfg_para(Config->process_id,"lock_file",Config->lock_file); - - sprintf(line,"%d",Config->run_number); - db_add_cfg_para(Config->process_id,"run_number",line); - - db_add_cfg_para(Config->process_id,"output_mode",Config->output_mode); + //int i; + //char line[2048]; + + + //db_add_cfg_para(Config->process_id,"config_file",Config->config_file); + printf("DBINFO - add_proc_config_para %d %s %s\n",Config->process_id,"config_file",Config->config_file); + + //db_add_cfg_para(Config->process_id,"start_file",Config->start_file); + //db_add_cfg_para(Config->process_id,"quit_file",Config->quit_file); + //db_add_cfg_para(Config->process_id,"initok_file",Config->initok_file); + //db_add_cfg_para(Config->process_id,"initfail_file",Config->initfail_file); + //db_add_cfg_para(Config->process_id,"lock_file",Config->lock_file); + printf("DBINFO - add_proc_config_para %d %s %s\n",Config->process_id,"start_file",Config->start_file); + printf("DBINFO - add_proc_config_para %d %s %s\n",Config->process_id,"quit_file",Config->quit_file); + printf("DBINFO - add_proc_config_para %d %s %s\n",Config->process_id,"initok_file",Config->initok_file); + printf("DBINFO - add_proc_config_para %d %s %s\n",Config->process_id,"initfail_file",Config->initfail_file); + printf("DBINFO - add_proc_config_para %d %s %s\n",Config->process_id,"lock_file",Config->lock_file); + + //sprintf(line,"%d",Config->run_number); + //db_add_cfg_para(Config->process_id,"run_number",line); + //printf("DBINFO - add_proc_config_para %d %s %d\n",Config->process_id,"run_number",Config->run_number); + + printf("DBINFO - add_proc_config_para %d %s 0x%02x\n",Config->process_id,"trigger_mask",Config->trigger_mask); + printf("DBINFO - add_proc_config_para %d %s 0x%02x\n",Config->process_id,"busy_mask",Config->busy_mask); + + printf("DBINFO - add_proc_config_para %d %s 0x%02x\n",Config->process_id,"timepix_shutter_delay",Config->timepix_shutter_delay); + printf("DBINFO - add_proc_config_para %d %s 0x%02x\n",Config->process_id,"timepix_shutter_width",Config->timepix_shutter_width); + + printf("DBINFO - add_proc_config_para %d %s 0x%02x\n",Config->process_id,"trigger0_delay",Config->trigger0_delay); + + printf("DBINFO - add_proc_config_para %d %s 0x%04x\n",Config->process_id,"correlated_trigger_delay",Config->correlated_trigger_delay); + + printf("DBINFO - add_proc_config_para %d %s %u\n",Config->process_id,"trig1_scale_global",Config->trig1_scale_global); + printf("DBINFO - add_proc_config_para %d %s %u\n",Config->process_id,"trig2_scale_global",Config->trig2_scale_global); + printf("DBINFO - add_proc_config_para %d %s %u\n",Config->process_id,"trig3_scale_global",Config->trig3_scale_global); + printf("DBINFO - add_proc_config_para %d %s %u\n",Config->process_id,"trig4_scale_global",Config->trig4_scale_global); + printf("DBINFO - add_proc_config_para %d %s %u\n",Config->process_id,"trig5_scale_global",Config->trig5_scale_global); + printf("DBINFO - add_proc_config_para %d %s %u\n",Config->process_id,"trig6_scale_global",Config->trig6_scale_global); + printf("DBINFO - add_proc_config_para %d %s %u\n",Config->process_id,"trig7_scale_global",Config->trig7_scale_global); + + printf("DBINFO - add_proc_config_para %d %s %u\n",Config->process_id,"trig0_scale_autopass",Config->trig0_scale_autopass); + printf("DBINFO - add_proc_config_para %d %s %u\n",Config->process_id,"trig1_scale_autopass",Config->trig1_scale_autopass); + printf("DBINFO - add_proc_config_para %d %s %u\n",Config->process_id,"trig2_scale_autopass",Config->trig2_scale_autopass); + printf("DBINFO - add_proc_config_para %d %s %u\n",Config->process_id,"trig3_scale_autopass",Config->trig3_scale_autopass); + printf("DBINFO - add_proc_config_para %d %s %u\n",Config->process_id,"trig4_scale_autopass",Config->trig4_scale_autopass); + printf("DBINFO - add_proc_config_para %d %s %u\n",Config->process_id,"trig5_scale_autopass",Config->trig5_scale_autopass); + printf("DBINFO - add_proc_config_para %d %s %u\n",Config->process_id,"trig6_scale_autopass",Config->trig6_scale_autopass); + printf("DBINFO - add_proc_config_para %d %s %u\n",Config->process_id,"trig7_scale_autopass",Config->trig7_scale_autopass); + + //db_add_cfg_para(Config->process_id,"output_mode",Config->output_mode); + printf("DBINFO - add_proc_config_para %d %s %s\n",Config->process_id,"output_mode",Config->output_mode); if (strcmp(Config->output_mode,"STREAM")==0) { - db_add_cfg_para(Config->process_id,"output_stream",Config->output_stream); + //db_add_cfg_para(Config->process_id,"output_stream",Config->output_stream); + printf("DBINFO - add_proc_config_para %d %s %s\n",Config->process_id,"output_stream",Config->output_stream); } else { - db_add_cfg_para(Config->process_id,"data_dir",Config->data_dir); - db_add_cfg_para(Config->process_id,"data_file",Config->data_file); + //db_add_cfg_para(Config->process_id,"data_dir",Config->data_dir); + //db_add_cfg_para(Config->process_id,"data_file",Config->data_file); + printf("DBINFO - add_proc_config_para %d %s %s\n",Config->process_id,"data_dir",Config->data_dir); + printf("DBINFO - add_proc_config_para %d %s %s\n",Config->process_id,"data_file",Config->data_file); } - sprintf(line,"%d",Config->node_id); - db_add_cfg_para(Config->process_id,"node_id",line); + //sprintf(line,"%d",Config->node_id); + //db_add_cfg_para(Config->process_id,"node_id",line); + //printf("DBINFO - add_proc_config_para %d %s %d\n",Config->process_id,"node_id",Config->node_id); - sprintf(line,"%d",Config->total_daq_time); - db_add_cfg_para(Config->process_id,"total_daq_time",line); + //sprintf(line,"%d",Config->total_daq_time); + //db_add_cfg_para(Config->process_id,"total_daq_time",line); + printf("DBINFO - add_proc_config_para %d %s %d\n",Config->process_id,"total_daq_time",Config->total_daq_time); - sprintf(line,"%u",Config->daq_loop_delay); - db_add_cfg_para(Config->process_id,"daq_loop_delay",line); + //sprintf(line,"%u",Config->daq_loop_delay); + //db_add_cfg_para(Config->process_id,"daq_loop_delay",line); + printf("DBINFO - add_proc_config_para %d %s %d\n",Config->process_id,"daq_loop_delay",Config->daq_loop_delay); // In STREAM mode the output file never changes if (strcmp(Config->output_mode,"FILE")==0) { - sprintf(line,"%u",Config->file_max_duration); - db_add_cfg_para(Config->process_id,"file_max_duration",line); + //sprintf(line,"%u",Config->file_max_duration); + //db_add_cfg_para(Config->process_id,"file_max_duration",line); + printf("DBINFO - add_proc_config_para %d %s %d\n",Config->process_id,"file_max_duration",Config->file_max_duration); - sprintf(line,"%lu",Config->file_max_size); - db_add_cfg_para(Config->process_id,"file_max_size",line); + //sprintf(line,"%lu",Config->file_max_size); + //db_add_cfg_para(Config->process_id,"file_max_size",line); + printf("DBINFO - add_proc_config_para %d %s %lu\n",Config->process_id,"file_max_size",Config->file_max_size); - sprintf(line,"%u",Config->file_max_events); - db_add_cfg_para(Config->process_id,"file_max_events",line); + //sprintf(line,"%u",Config->file_max_events); + //db_add_cfg_para(Config->process_id,"file_max_events",line); + printf("DBINFO - add_proc_config_para %d %s %u\n",Config->process_id,"file_max_events",Config->file_max_events); } + printf("DBINFO - add_proc_config_para %d %s %u\n",Config->process_id,"debug_scale",Config->debug_scale); + return 0; } -*/ int end_config() { diff --git a/PadmeTrig/src/PadmeTrig.c b/PadmeTrig/src/PadmeTrig.c index 4d04ce39..d8b4d230 100644 --- a/PadmeTrig/src/PadmeTrig.c +++ b/PadmeTrig/src/PadmeTrig.c @@ -171,31 +171,32 @@ int create_initfail_file() void proc_finalize(int error,int rmv_lock,int create_file,int update_db,int status) { if (create_file) create_initfail_file(); - if (update_db && Config->run_number) { + //if (update_db && Config->run_number) { if (status == DB_STATUS_IDLE) { - printf("- Setting process status to IDLE (%d) in DB\n",DB_STATUS_IDLE); + printf("- Setting process status to IDLE (%d)\n",DB_STATUS_IDLE); } else if (status == DB_STATUS_INITIALIZING) { - printf("- Setting process status to INITIALIZING (%d) in DB\n",DB_STATUS_INITIALIZING); + printf("- Setting process status to INITIALIZING (%d)\n",DB_STATUS_INITIALIZING); } else if (status == DB_STATUS_INIT_FAIL) { - printf("- Setting process status to INIT_FAIL (%d) in DB\n",DB_STATUS_INIT_FAIL); + printf("- Setting process status to INIT_FAIL (%d)\n",DB_STATUS_INIT_FAIL); } else if (status == DB_STATUS_INITIALIZED) { - printf("- Setting process status to INITIALIZED (%d) in DB\n",DB_STATUS_INITIALIZED); + printf("- Setting process status to INITIALIZED (%d)\n",DB_STATUS_INITIALIZED); } else if (status == DB_STATUS_ABORTED) { - printf("- Setting process status to ABORTED (%d) in DB\n",DB_STATUS_ABORTED); + printf("- Setting process status to ABORTED (%d)\n",DB_STATUS_ABORTED); } else if (status == DB_STATUS_RUNNING) { - printf("- Setting process status to RUNNING (%d) in DB\n",DB_STATUS_RUNNING); + printf("- Setting process status to RUNNING (%d)\n",DB_STATUS_RUNNING); } else if (status == DB_STATUS_RUN_FAIL) { - printf("- Setting process status to RUN_FAIL (%d) in DB\n",DB_STATUS_RUN_FAIL); + printf("- Setting process status to RUN_FAIL (%d)\n",DB_STATUS_RUN_FAIL); } else if (status == DB_STATUS_FINISHED) { - printf("- Setting process status to FINISHED (%d) in DB\n",DB_STATUS_FINISHED); + printf("- Setting process status to FINISHED (%d)\n",DB_STATUS_FINISHED); } else if (status == DB_STATUS_CLOSE_FAIL) { - printf("- Setting process status to CLOSE_FAIL (%d) in DB\n",DB_STATUS_CLOSE_FAIL); + printf("- Setting process status to CLOSE_FAIL (%d)\n",DB_STATUS_CLOSE_FAIL); } else { - printf("- Setting process status to UNKNOWN (%d) in DB\n",DB_STATUS_UNKNOWN); + printf("- Setting process status to UNKNOWN (%d)\n",DB_STATUS_UNKNOWN); status = DB_STATUS_UNKNOWN; } - db_process_set_status(Config->process_id,status); - } + //db_process_set_status(Config->process_id,status); + printf("DBINFO - process_set_status %d %d\n",Config->process_id,status); + //} if (rmv_lock) remove_lock(); if (error) exit(1); exit(0); @@ -228,8 +229,8 @@ int main(int argc, char *argv[]) { pid_t pid; int c; - int rc; - unsigned char mask[4]; + //int rc; + //unsigned char mask[4]; unsigned char val1; unsigned short int val2; //unsigned int val4; @@ -286,16 +287,11 @@ int main(int argc, char *argv[]) { unsigned int i,p; - int trigger = -1; // Used to blindly set a trigger mask when -t option is specified + //int trigger = -1; // Used to blindly set a trigger mask when -t option is specified // Use line buffering for stdout setlinebuf(stdout); - // Show welcome message - printf("===========================================\n"); - printf("=== Welcome to the PADME Trigger system ===\n"); - printf("===========================================\n"); - // Initialize run configuration if ( init_config() ) { printf("*** ERROR *** Problem initializing run configuration.\n"); @@ -318,21 +314,22 @@ int main(int argc, char *argv[]) { exit(1); } break; - case 't': - trigger = atoi(optarg); - break; + //case 't': + // trigger = atoi(optarg); + // break; case 'h': - fprintf(stdout,"\nPadmeTrig [-c cfg_file] [-t mask] [-h]\n\n"); + //fprintf(stdout,"\nPadmeTrig [-c cfg_file] [-t mask] [-h]\n\n"); + fprintf(stdout,"\nPadmeTrig [-c cfg_file] [-h]\n\n"); fprintf(stdout," -c: use file 'cfg_file' to set configuration parameters for this process\n"); fprintf(stdout," If no file is specified, use default settings\n"); - fprintf(stdout," -t: set trigger mask and exit (default mask: 0x00)\n"); + //fprintf(stdout," -t: set trigger mask and exit (default mask: 0x00)\n"); fprintf(stdout," -h: show this help message and exit\n\n"); exit(0); case '?': if (optopt == 'c') fprintf (stderr, "Option -%c requires an argument.\n", optopt); - else if (optopt == 't') - trigger = 0; + //else if (optopt == 't') + // trigger = 0; else if (isprint (optopt)) fprintf (stderr, "Unknown option '-%c'.\n", optopt); else @@ -342,6 +339,12 @@ int main(int argc, char *argv[]) { abort (); } + // Show welcome message + printf("===========================================\n"); + printf("=== Welcome to the PADME Trigger system ===\n"); + printf("===========================================\n"); + + /* // If a trigger mask was explicitily specified, just set it and exit if (trigger != -1) { @@ -377,65 +380,71 @@ int main(int argc, char *argv[]) { exit(0); } + */ // Show configuration print_config(); + // Save configuration to DB + save_config(); + // Check if another PadmeTrig program is running printf("\n=== Verifying that no other PadmeTrig instances are running ===\n"); pid = create_lock(); if (pid > 0) { printf("*** ERROR *** Another PadmeTrig is running with PID %d. Exiting.\n",pid); - proc_finalize(1,0,1,0,0); + proc_finalize(1,0,1,0,DB_STATUS_INIT_FAIL); } else if (pid < 0) { printf("*** ERROR *** Problems while creating lock file '%s'. Exiting.\n",Config->lock_file); - proc_finalize(1,0,1,0,0); + proc_finalize(1,0,1,0,DB_STATUS_INIT_FAIL); } - if ( Config->run_number ) { - - // Connect to DB - if ( db_init() != DB_OK ) { - printf("*** ERROR *** Unable to initialize DB connection. Exiting.\n"); - proc_finalize(1,1,1,0,0); - } - - // Verify if run number is valid - rc = db_run_check(Config->run_number); - if ( rc != 1 ) { - if ( rc < 0 ) { - printf("ERROR: DB check for run number %d returned an error\n",Config->run_number); - } else if ( rc == 0 ) { - printf("ERROR: run number %d does not exist in the DB\n",Config->run_number); - } - proc_finalize(1,1,1,0,0); - } - - // Verify if process id is valid - rc = db_process_check(Config->process_id); - if ( rc < 0 ) { - printf("ERROR: DB check for process id %d returned an error\n",Config->process_id); - proc_finalize(1,1,1,0,0); - } else if ( rc == 0 ) { - printf("ERROR: process id %d does not exist in DB\n",Config->process_id); - proc_finalize(1,1,1,0,0); - } else if ( rc > 1 ) { - printf("ERROR: multiple copies of process id %d found in DB\n",Config->process_id); - proc_finalize(1,1,1,0,0); - } - int status = db_process_get_status(Config->process_id); - if (status!=DB_STATUS_IDLE) { - printf("ERROR: process id %d is not in IDLE (%d) status (status=%d)\n",Config->process_id,DB_STATUS_IDLE,status); - proc_finalize(1,1,1,0,0); - } - - } + //if ( Config->run_number ) { + // + // // Connect to DB + // if ( db_init() != DB_OK ) { + // printf("*** ERROR *** Unable to initialize DB connection. Exiting.\n"); + // proc_finalize(1,1,1,0,0); + // } + // + // // Verify if run number is valid + // rc = db_run_check(Config->run_number); + // if ( rc != 1 ) { + // if ( rc < 0 ) { + // printf("ERROR: DB check for run number %d returned an error\n",Config->run_number); + // } else if ( rc == 0 ) { + // printf("ERROR: run number %d does not exist in the DB\n",Config->run_number); + // } + // proc_finalize(1,1,1,0,0); + // } + // + // // Verify if process id is valid + // rc = db_process_check(Config->process_id); + // if ( rc < 0 ) { + // printf("ERROR: DB check for process id %d returned an error\n",Config->process_id); + // proc_finalize(1,1,1,0,0); + // } else if ( rc == 0 ) { + // printf("ERROR: process id %d does not exist in DB\n",Config->process_id); + // proc_finalize(1,1,1,0,0); + // } else if ( rc > 1 ) { + // printf("ERROR: multiple copies of process id %d found in DB\n",Config->process_id); + // proc_finalize(1,1,1,0,0); + // } + // int status = db_process_get_status(Config->process_id); + // if (status!=DB_STATUS_IDLE) { + // printf("ERROR: process id %d is not in IDLE (%d) status (status=%d)\n",Config->process_id,DB_STATUS_IDLE,status); + // proc_finalize(1,1,1,0,0); + // } + // + //} // Update process status - if (Config->run_number) { - printf("- Setting process status to INITIALIZING (%d) in DB\n",DB_STATUS_INITIALIZING); - db_process_set_status(Config->process_id,DB_STATUS_INITIALIZING); - } + //if (Config->run_number) { + // printf("- Setting process status to INITIALIZING (%d) in DB\n",DB_STATUS_INITIALIZING); + // db_process_set_status(Config->process_id,DB_STATUS_INITIALIZING); + //} + printf("- Setting process status to INITIALIZING (%d)\n",DB_STATUS_INITIALIZING); + printf("DBINFO - process_set_status %d %d\n",Config->process_id,DB_STATUS_INITIALIZING); // Allocate output event buffer (max among file header, trigger event, file tail) maxPEvtSize = 16; // Header 12, Event 12, Tail 16 @@ -668,10 +677,12 @@ int main(int argc, char *argv[]) { // Initialization is now finished: create InitOK file to tell RunControl we are ready. printf("- Trigger board initialized: waiting for start_file '%s'\n",Config->start_file); - if (Config->run_number) { - printf("- Setting process status to INITIALIZED (%d) in DB\n",DB_STATUS_INITIALIZED); - db_process_set_status(Config->process_id,DB_STATUS_INITIALIZED); - } + //if (Config->run_number) { + // printf("- Setting process status to INITIALIZED (%d) in DB\n",DB_STATUS_INITIALIZED); + // db_process_set_status(Config->process_id,DB_STATUS_INITIALIZED); + //} + printf("- Setting process status to INITIALIZED (%d)\n",DB_STATUS_INITIALIZED); + printf("DBINFO - process_set_status %d %d\n",Config->process_id,DB_STATUS_INITIALIZED); if ( create_initok_file() ) { printf("PadmeTrig *** ERROR *** Problem while creating InitOK file. Exiting.\n"); proc_finalize(1,1,1,1,DB_STATUS_INIT_FAIL); @@ -702,15 +713,18 @@ int main(int argc, char *argv[]) { printf("%s - Starting trigger generation\n",format_time(t_daqstart)); //old_sys_time = t_daqstart; - if ( Config->run_number ) { - // Tell DB that the process has started - printf("- Setting process status to RUNNING (%d) in DB\n",DB_STATUS_RUNNING); - db_process_set_status(Config->process_id,DB_STATUS_RUNNING); - if ( db_process_open(Config->process_id,t_daqstart) != DB_OK ) { - printf("PadmeTrig *** ERROR *** Unable to open process in DB. Exiting.\n"); - proc_finalize(1,1,0,1,DB_STATUS_RUN_FAIL); - } - } + //if ( Config->run_number ) { + // // Tell DB that the process has started + // printf("- Setting process status to RUNNING (%d) in DB\n",DB_STATUS_RUNNING); + // db_process_set_status(Config->process_id,DB_STATUS_RUNNING); + // if ( db_process_open(Config->process_id,t_daqstart) != DB_OK ) { + // printf("PadmeTrig *** ERROR *** Unable to open process in DB. Exiting.\n"); + // proc_finalize(1,1,0,1,DB_STATUS_RUN_FAIL); + // } + //} + printf("- Setting process status to RUNNING (%d)\n",DB_STATUS_RUNNING); + printf("DBINFO - process_set_status %d %d\n",Config->process_id,DB_STATUS_RUNNING); + printf("DBINFO - process_set_time_start %d %s\n",Config->process_id,format_time(t_daqstart)); // Zero counters totalReadSize = 0; @@ -718,6 +732,11 @@ int main(int argc, char *argv[]) { totalWriteSize = 0; totalWriteEvents = 0; + // Initialize file counters + fileTOpen[fileIndex] = t_daqstart; + fileSize[fileIndex] = 0; + fileEvents[fileIndex] = 0; + // When using FILE output, only open file when DAQ has started if ( strcmp(Config->output_mode,"FILE")==0 ) { @@ -737,12 +756,10 @@ int main(int argc, char *argv[]) { proc_finalize(1,1,0,1,DB_STATUS_RUN_FAIL); } - } + printf("DBINFO - file_create %s %s %d %d %d\n",fileName[fileIndex],"TRIGDATA",PEVT_CURRENT_VERSION,Config->process_id,fileIndex); + printf("DBINFO - file_set_time_open %s %s\n",fileName[fileIndex],format_time(fileTOpen[fileIndex])); - // Initialize file counters - fileTOpen[fileIndex] = t_daqstart; - fileSize[fileIndex] = 0; - fileEvents[fileIndex] = 0; + } /* // Start counting output files @@ -921,6 +938,9 @@ int main(int argc, char *argv[]) { format_time(fileTClose[fileIndex]),pathName[fileIndex], (int)(fileTClose[fileIndex]-fileTOpen[fileIndex]), fileEvents[fileIndex],fileSize[fileIndex]); + printf("DBINFO - file_set_time_close %s %s\n",fileName[fileIndex],format_time(fileTClose[fileIndex])); + printf("DBINFO - file_set_n_events %s %u\n",fileName[fileIndex],fileEvents[fileIndex]); + printf("DBINFO - file_set_size %s %lu\n",fileName[fileIndex],fileSize[fileIndex]); // Update file counter fileIndex++; @@ -944,6 +964,9 @@ int main(int argc, char *argv[]) { fileSize[fileIndex] = 0; fileEvents[fileIndex] = 0; + printf("DBINFO - file_create %s %s %d %d %d\n",fileName[fileIndex],"TRIGDATA",PEVT_CURRENT_VERSION,Config->process_id,fileIndex); + printf("DBINFO - file_set_time_open %s %s\n",fileName[fileIndex],format_time(fileTOpen[fileIndex])); + // Write header to file fHeadSize = create_file_head(fileIndex,Config->run_number,fileTOpen[fileIndex],(void *)outEvtBuffer); writeSize = write(fileHandle,outEvtBuffer,fHeadSize); @@ -1008,11 +1031,17 @@ int main(int argc, char *argv[]) { }; if ( strcmp(Config->output_mode,"FILE")==0 ) { printf("%s - Closed output file '%s' after %d secs with %u events and size %lu bytes\n", - format_time(t_now),pathName[fileIndex],(int)(fileTClose[fileIndex]-fileTOpen[fileIndex]), + format_time(fileTClose[fileIndex]),pathName[fileIndex], + (int)(fileTClose[fileIndex]-fileTOpen[fileIndex]), fileEvents[fileIndex],fileSize[fileIndex]); + printf("DBINFO - file_set_time_close %s %s\n",fileName[fileIndex],format_time(t_now)); + printf("DBINFO - file_set_n_events %s %u\n",fileName[fileIndex],fileEvents[fileIndex]); + printf("DBINFO - file_set_size %s %lu\n",fileName[fileIndex],fileSize[fileIndex]); + } else { printf("%s - Closed output stream '%s' after %d secs with %u events and size %lu bytes\n", - format_time(t_now),pathName[fileIndex],(int)(fileTClose[fileIndex]-fileTOpen[fileIndex]), + format_time(fileTClose[fileIndex]),pathName[fileIndex], + (int)(fileTClose[fileIndex]-fileTOpen[fileIndex]), fileEvents[fileIndex],fileSize[fileIndex]); } @@ -1050,14 +1079,20 @@ int main(int argc, char *argv[]) { } // Tell DB that the process has ended - if ( Config->run_number ) { - if ( db_process_close(Config->process_id,t_daqstop,totalWriteSize,totalWriteEvents) != DB_OK ) { - printf("*** ERROR *** Problem while closing process in DB. Exiting.\n"); - proc_finalize(1,1,0,0,0); - } - printf("- Setting process status to FINISHED (%d) in DB\n",DB_STATUS_FINISHED); - db_process_set_status(Config->process_id,DB_STATUS_FINISHED); - } + //if ( Config->run_number ) { + // if ( db_process_close(Config->process_id,t_daqstop,totalWriteSize,totalWriteEvents) != DB_OK ) { + // printf("*** ERROR *** Problem while closing process in DB. Exiting.\n"); + // proc_finalize(1,1,0,0,0); + // } + // printf("- Setting process status to FINISHED (%d) in DB\n",DB_STATUS_FINISHED); + // db_process_set_status(Config->process_id,DB_STATUS_FINISHED); + //} + printf("- Setting process status to FINISHED (%d)\n",DB_STATUS_FINISHED); + printf("DBINFO - process_set_status %d %d\n",Config->process_id,DB_STATUS_FINISHED); + printf("DBINFO - process_set_time_stop %d %s\n",Config->process_id,format_time(t_daqstop)); + printf("DBINFO - process_set_n_files %d %d\n",Config->process_id,fileIndex); + printf("DBINFO - process_set_total_events %d %d\n",Config->process_id,totalWriteEvents); + printf("DBINFO - process_set_total_size %d %ld\n",Config->process_id,totalWriteSize); // Give some final report evtReadPerSec = 0.; @@ -1078,7 +1113,7 @@ int main(int argc, char *argv[]) { printf("Total number of events written: %u - %6.2f events/s\n",totalWriteEvents,evtWritePerSec); printf("Total size of data written: %lu B - %6.2f KB/s\n",totalWriteSize,sizeWritePerSec); if ( strcmp(Config->output_mode,"FILE")==0 ) { - printf("=== Files created =======================================\n"); + printf("=== %2d files created =====================================\n",fileIndex); for(i=0;i Date: Tue, 26 Nov 2019 17:05:46 +0100 Subject: [PATCH 23/64] PadmeDB: small changes to DB schema; RunControl: change process handlers for new schema --- PadmeDB/PadmeDAQ_schema.sql | 8 +- RunControl/code/ADCBoard.py | 160 ++++++++++++++++++++---------------- RunControl/code/Level1.py | 32 +++----- RunControl/code/Merger.py | 41 ++++----- RunControl/code/PadmeDB.py | 8 +- RunControl/code/Trigger.py | 153 +++++++++++++++++++++------------- 6 files changed, 224 insertions(+), 178 deletions(-) diff --git a/PadmeDB/PadmeDAQ_schema.sql b/PadmeDB/PadmeDAQ_schema.sql index c2344c8b..3d84303d 100644 --- a/PadmeDB/PadmeDAQ_schema.sql +++ b/PadmeDB/PadmeDAQ_schema.sql @@ -1,5 +1,5 @@ -- MySQL Script generated by MySQL Workbench --- 11/22/19 09:39:01 +-- 11/26/19 16:59:26 -- Model: New Model Version: 1.0 -- MySQL Workbench Forward Engineering @@ -35,15 +35,17 @@ ENGINE = InnoDB; -- ----------------------------------------------------- CREATE TABLE IF NOT EXISTS `PadmeDAQ`.`run` ( `number` INT NOT NULL, + `name` VARCHAR(255) NULL, + `user` VARCHAR(1024) NULL, `run_type_id` INT NOT NULL, `status` INT NOT NULL, `time_init` DATETIME NULL, `time_start` DATETIME NULL, `time_stop` DATETIME NULL, `total_events` INT UNSIGNED NULL, - `user` VARCHAR(1024) NULL, PRIMARY KEY (`number`), INDEX `fk_run_run_type1_idx` (`run_type_id` ASC), + UNIQUE INDEX `name_UNIQUE` (`name` ASC), CONSTRAINT `fk_run_run_type1` FOREIGN KEY (`run_type_id`) REFERENCES `PadmeDAQ`.`run_type` (`id`) @@ -233,8 +235,10 @@ CREATE TABLE IF NOT EXISTS `PadmeDAQ`.`process` ( `process_type_id` INT NOT NULL, `node_id` INT NOT NULL, `status` INT NOT NULL, + `time_create` DATETIME NULL, `time_start` DATETIME NULL, `time_stop` DATETIME NULL, + `time_end` DATETIME NULL, `n_files` INT UNSIGNED NULL, `total_events` INT UNSIGNED NULL, `total_size` BIGINT UNSIGNED NULL, diff --git a/RunControl/code/ADCBoard.py b/RunControl/code/ADCBoard.py index a1c63667..587b905d 100644 --- a/RunControl/code/ADCBoard.py +++ b/RunControl/code/ADCBoard.py @@ -35,25 +35,34 @@ def set_default_config(self): self.executable = os.getenv('PADME',".")+"/PadmeDAQ/PadmeDAQ.exe" self.run_number = 0 + self.process_id = -1 self.process_mode = "DAQ" self.config_file_daq = "unset" self.log_file_daq = "unset" self.lock_file_daq = "unset" - self.output_stream_daq = "unset" self.initok_file_daq = "unset" self.initfail_file_daq = "unset" + self.output_mode_daq = "STREAM" + self.output_stream_daq = "unset" + self.data_dir_daq = "unset" + self.data_file_daq = "daq" + self.config_file_zsup = "unset" self.log_file_zsup = "unset" self.lock_file_zsup = "unset" - self.output_mode = "STREAM" - self.input_stream_zsup = "unset" - self.output_stream_zsup = "unset" self.initok_file_zsup = "unset" self.initfail_file_zsup = "unset" + self.input_stream_zsup = "unset" + + self.output_mode = "STREAM" + self.output_stream_zsup = "unset" + self.data_dir_zsup = "unset" + self.data_file_zsup = "zsup" + self.start_file = "unset" self.quit_file = "unset" @@ -77,13 +86,6 @@ def set_default_config(self): self.auto_threshold = int('0x0400',0) self.auto_duration = 150 - # Default DAQ control parameters - self.daq_loop_delay = 100000 - self.debug_scale = 100 - self.file_max_duration = 900 - self.file_max_size = 1024*1024*1024 - self.file_max_events = 100000 - # Default zero suppression settings self.zero_suppression = 0 @@ -98,6 +100,15 @@ def set_default_config(self): self.zs2_minrms_ch = [] for ch in range(32): self.zs2_minrms_ch.append(self.zs2_minrms) + # Default file parameters + self.file_max_duration = 3600 + self.file_max_size = 1024*1024*1024 + self.file_max_events = 1000*1000 + + # Default DAQ control parameters + self.daq_loop_delay = 10000 + self.debug_scale = 100 + def read_setup(self,setup): if (self.board_id == -1): @@ -197,8 +208,15 @@ def format_config_daq(self): cfgstring += "initok_file\t\t%s\n"%self.initok_file_daq cfgstring += "initfail_file\t\t%s\n"%self.initfail_file_daq - cfgstring += "output_mode\t\t%s\n"%self.output_mode - cfgstring += "output_stream\t\t%s\n"%self.output_stream_daq + cfgstring += "output_mode\t\t\t%s\n"%self.output_mode_daq + if self.output_mode_daq == "STREAM": + cfgstring += "output_stream\t\t\t%s\n"%self.output_stream_daq + elif self.output_mode_daq == "FILE": + cfgstring += "data_dir\t\t\t%s\n"%self.data_dir_daq + cfgstring += "data_file\t\t\t%s\n"%self.data_file_daq + cfgstring += "file_max_duration\t\t%d\n"%self.file_max_duration + cfgstring += "file_max_size\t\t\t%d\n"%self.file_max_size + cfgstring += "file_max_events\t\t%d\n"%self.file_max_events cfgstring += "total_daq_time\t\t%d\n"%self.total_daq_time @@ -220,12 +238,13 @@ def format_config_daq(self): cfgstring += "drs4corr_enable\t\t%d\n"%self.drs4corr_enable cfgstring += "drs4_sampfreq\t\t%d\n"%self.drs4_sampfreq - cfgstring += "daq_loop_delay\t\t%d\n"%self.daq_loop_delay - cfgstring += "debug_scale\t\t\t%d\n"%self.debug_scale - cfgstring += "auto_threshold\t\t%#04x\n"%self.auto_threshold cfgstring += "auto_duration\t\t%d\n"%self.auto_duration + cfgstring += "daq_loop_delay\t\t%d\n"%self.daq_loop_delay + + cfgstring += "debug_scale\t\t\t%d\n"%self.debug_scale + return cfgstring def format_config_zsup(self): @@ -242,8 +261,8 @@ def format_config_zsup(self): cfgstring += "node_id\t\t\t%d\n"%self.node_id cfgstring += "node_ip\t\t\t%s\n"%self.node_ip - cfgstring += "conet2_link\t\t%d\n"%self.conet2_link - cfgstring += "conet2_slot\t\t%d\n"%self.conet2_slot + #cfgstring += "conet2_link\t\t%d\n"%self.conet2_link + #cfgstring += "conet2_slot\t\t%d\n"%self.conet2_slot cfgstring += "config_file\t\t%s\n"%self.config_file_zsup cfgstring += "log_file\t\t%s\n"%self.log_file_zsup @@ -251,8 +270,16 @@ def format_config_zsup(self): cfgstring += "initok_file\t\t%s\n"%self.initok_file_zsup cfgstring += "initfail_file\t\t%s\n"%self.initfail_file_zsup - cfgstring += "output_mode\t\t%s\n"%self.output_mode - cfgstring += "output_stream\t\t%s\n"%self.output_stream_zsup + cfgstring += "output_mode\t\t\t%s\n"%self.output_mode_zsup + if self.output_mode_zsup == "STREAM": + cfgstring += "output_stream\t\t\t%s\n"%self.output_stream_zsup + elif self.output_mode_zsup == "FILE": + cfgstring += "data_dir\t\t\t%s\n"%self.data_dir_zsup + cfgstring += "data_file\t\t\t%s\n"%self.data_file_zsup + cfgstring += "file_max_duration\t\t%d\n"%self.file_max_duration + cfgstring += "file_max_size\t\t\t%d\n"%self.file_max_size + cfgstring += "file_max_events\t\t%d\n"%self.file_max_events + cfgstring += "input_stream\t\t%s\n"%self.input_stream_zsup cfgstring += "zero_suppression\t%d\n"%self.zero_suppression @@ -301,7 +328,7 @@ def create_proc_daq(self): # Create DAQ process in DB self.proc_daq_id = self.db.create_daq_process(self.run_number,self.node_id) if self.proc_daq_id == -1: - print "ADCBoard::create_proc_daq - ERROR: unable to create new DAQ proces in DB" + print "ADCBoard::create_proc_daq - ERROR: unable to create new DAQ process in DB" return "error" # Add info about optical link @@ -328,9 +355,16 @@ def create_proc_daq(self): self.db.add_cfg_para_daq(self.proc_daq_id,"initok_file", self.initok_file_daq) self.db.add_cfg_para_daq(self.proc_daq_id,"initfail_file", self.initfail_file_daq) - self.db.add_cfg_para_daq(self.proc_daq_id,"output_mode", self.output_mode) - self.db.add_cfg_para_daq(self.proc_daq_id,"output_stream", self.output_stream_daq) - + self.db.add_cfg_para_daq(self.proc_daq_id,"output_mode", self.output_mode_daq) + if self.output_mode_daq == "STREAM": + self.db.add_cfg_para_daq(self.proc_daq_id,"output_stream", self.output_stream_daq) + elif self.output_mode_daq == "FILE": + self.db.add_cfg_para_trigger(self.proc_daq_id,"data_dir", self.data_dir_daq) + self.db.add_cfg_para_trigger(self.proc_daq_id,"data_file", self.data_file_daq) + self.db.add_cfg_para_trigger(self.proc_daq_id,"file_max_duration", self.file_max_duration) + self.db.add_cfg_para_trigger(self.proc_daq_id,"file_max_size", self.file_max_size) + self.db.add_cfg_para_trigger(self.proc_daq_id,"file_max_events", self.file_max_events) + self.db.add_cfg_para_daq(self.proc_daq_id,"total_daq_time", repr(self.total_daq_time)) self.db.add_cfg_para_daq(self.proc_daq_id,"startdaq_mode", repr(self.startdaq_mode)) @@ -351,12 +385,13 @@ def create_proc_daq(self): self.db.add_cfg_para_daq(self.proc_daq_id,"drs4corr_enable", repr(self.drs4corr_enable)) self.db.add_cfg_para_daq(self.proc_daq_id,"drs4_sampfreq", repr(self.drs4_sampfreq)) - self.db.add_cfg_para_daq(self.proc_daq_id,"daq_loop_delay", repr(self.daq_loop_delay)) - self.db.add_cfg_para_daq(self.proc_daq_id,"debug_scale", repr(self.debug_scale)) - self.db.add_cfg_para_daq(self.proc_daq_id,"auto_threshold", "%#04x"%self.auto_threshold) self.db.add_cfg_para_daq(self.proc_daq_id,"auto_duration", repr(self.auto_duration)) + self.db.add_cfg_para_daq(self.proc_daq_id,"daq_loop_delay", repr(self.daq_loop_delay)) + + self.db.add_cfg_para_daq(self.proc_daq_id,"debug_scale", repr(self.debug_scale)) + return "ok" def create_proc_zsup(self): @@ -377,8 +412,6 @@ def create_proc_zsup(self): self.db.add_cfg_para_daq(self.proc_zsup_id,"node_id", repr(self.node_id)) self.db.add_cfg_para_daq(self.proc_zsup_id,"node_ip", self.node_ip) - self.db.add_cfg_para_daq(self.proc_zsup_id,"conet2_link", repr(self.conet2_link)) - self.db.add_cfg_para_daq(self.proc_zsup_id,"conet2_slot", repr(self.conet2_slot)) self.db.add_cfg_para_daq(self.proc_zsup_id,"config_file", self.config_file_zsup) self.db.add_cfg_para_daq(self.proc_zsup_id,"log_file", self.log_file_zsup) @@ -386,9 +419,17 @@ def create_proc_zsup(self): self.db.add_cfg_para_daq(self.proc_zsup_id,"initok_file", self.initok_file_zsup) self.db.add_cfg_para_daq(self.proc_zsup_id,"initfail_file", self.initfail_file_zsup) - self.db.add_cfg_para_daq(self.proc_zsup_id,"output_mode", self.output_mode) - self.db.add_cfg_para_daq(self.proc_zsup_id,"output_stream", self.output_stream_zsup) - self.db.add_cfg_para_daq(self.proc_zsup_id,"input_stream", self.input_stream_zsup) + self.db.add_cfg_para_daq(self.proc_zsup_id,"output_mode", self.output_mode_zsup) + if self.output_mode_zsup == "STREAM": + self.db.add_cfg_para_daq(self.proc_zsup_id,"output_stream", self.output_stream_zsup) + elif self.output_mode_zsup == "FILE": + self.db.add_cfg_para_trigger(self.proc_zsup_id,"data_dir", self.data_dir_zsup) + self.db.add_cfg_para_trigger(self.proc_zsup_id,"data_file", self.data_file_zsup) + self.db.add_cfg_para_trigger(self.proc_zsup_id,"file_max_duration", self.file_max_duration) + self.db.add_cfg_para_trigger(self.proc_zsup_id,"file_max_size", self.file_max_size) + self.db.add_cfg_para_trigger(self.proc_zsup_id,"file_max_events", self.file_max_events) + + self.db.add_cfg_para_daq(self.proc_zsup_id,"input_stream", self.input_stream_zsup) self.db.add_cfg_para_daq(self.proc_zsup_id,"zero_suppression", repr(self.zero_suppression)) if (self.zero_suppression%100 == 1): @@ -429,47 +470,33 @@ def start_daq(self): # Open log file self.log_handle_daq = open(self.log_file_daq,"w") - # Start DAQ process - #try: - # self.process_daq = subprocess.Popen([self.executable,"-c",self.config_file_daq],stdout=self.log_handle_daq,stderr=subprocess.STDOUT,bufsize=1) - #except OSError as e: - # print "ADCBoard - ERROR: DAQ Execution failed: %s",e - # return 0 - # Start DAQ process try: - #self.process_daq = subprocess.Popen(command.split(),stdout=self.log_handle_daq,stderr=subprocess.STDOUT,bufsize=1) self.process_daq = subprocess.Popen(shlex.split(command),stdout=self.log_handle_daq,stderr=subprocess.STDOUT,bufsize=1) except OSError as e: print "ADCBoard::start_daq - ERROR: Execution failed: %s",e return 0 # Tag start of process in DB - if self.run_number: - self.db.set_process_time_start(self.proc_daq_id) + if self.run_number: self.db.set_process_time_create(self.proc_daq_id) # Return process id return self.process_daq.pid def stop_daq(self): - # Tag stop process in DB - if self.run_number: - self.db.set_process_time_stop(self.proc_daq_id) - # Wait up to 5 seconds for DAQ to stop of its own (on quit file or on time elapsed) for i in range(10): - if self.process_daq.poll() != None: - # Process exited: clean up defunct process and close log file self.process_daq.wait() self.log_handle_daq.close() - return 1 - + if self.run_number: self.db.set_process_time_end(self.proc_daq_id) + return True time.sleep(0.5) # Process did not stop: try sending and interrupt + print "ADCBoard::stop_daq- WARNING: DAQ process did not stop on its own. Sending interrupt" if self.node_id == 0: # If process is on local host, just send a kill signal command = "kill %d"%self.process_daq.pid @@ -482,23 +509,23 @@ def stop_daq(self): # Wait up to 5 seconds for DAQ to stop on interrupt for i in range(10): - if self.process_daq.poll() != None: - # Process exited: clean up defunct process and close log file self.process_daq.wait() self.log_handle_daq.close() - return 1 - + if self.run_number: self.db.set_process_time_end(self.proc_daq_id) + return True time.sleep(0.5) # Process did not stop smoothly: terminate it + print "ADCBoard::stop_daq - WARNING: DAQ process did not stop on interrupt. Terminating it" self.process_daq.terminate() time.sleep(1) if self.process_daq.poll() != None: self.process_daq.wait() self.log_handle_daq.close() - return 0 + if self.run_number: self.db.set_process_time_end(self.proc_daq_id) + return False def start_zsup(self): @@ -515,24 +542,15 @@ def start_zsup(self): # Open log file self.log_handle_zsup = open(self.log_file_zsup,"w") - # Start ZSUP process - #try: - # self.process_zsup = subprocess.Popen([self.executable,"-c",self.config_file_zsup],stdout=self.log_handle_zsup,stderr=subprocess.STDOUT,bufsize=1) - #except OSError as e: - # print "ADCBoard - ERROR: ZSUP execution failed: %s",e - # return 0 - # Start ZSUP process try: - #self.process_zsup = subprocess.Popen(command.split(),stdout=self.log_handle_zsup,stderr=subprocess.STDOUT,bufsize=1) self.process_zsup = subprocess.Popen(shlex.split(command),stdout=self.log_handle_zsup,stderr=subprocess.STDOUT,bufsize=1) except OSError as e: print "ADCBoard::start_zsup - ERROR: Execution failed: %s",e return 0 # Tag start of process in DB - if self.run_number: - self.db.set_process_time_start(self.proc_zsup_id) + if self.run_number: self.db.set_process_time_create(self.proc_zsup_id) # Return process id return self.process_zsup.pid @@ -540,25 +558,23 @@ def start_zsup(self): def stop_zsup(self): # Tag stop process in DB - if self.run_number: - self.db.set_process_time_stop(self.proc_zsup_id) # Wait up to 5 seconds for ZSUP to stop for i in range(10): - if self.process_zsup.poll() != None: - # Process exited: clean up defunct process and close log file self.process_zsup.wait() self.log_handle_zsup.close() - return 1 - + if self.run_number: self.db.set_process_time_end(self.proc_zsup_id) + return True time.sleep(0.5) # Process did not stop smoothly: terminate it + print "ADCBoard::stop_zsup - WARNING: ZSUP process did not stop on interrupt. Terminating it" self.process_zsup.terminate() time.sleep(1) if self.process_zsup.poll() != None: self.process_zsup.wait() self.log_handle_zsup.close() - return 0 + if self.run_number: self.db.set_process_time_end(self.proc_zsup_id) + return False diff --git a/RunControl/code/Level1.py b/RunControl/code/Level1.py index 394ebaed..9a5c97db 100644 --- a/RunControl/code/Level1.py +++ b/RunControl/code/Level1.py @@ -29,14 +29,13 @@ def set_default_config(self): self.executable = os.getenv('PADME',".")+"/Level1/PadmeLevel1.exe" self.run_number = 0 + self.process_id = -1 self.max_events = 10000 self.config_file = "unset" self.log_file = "unset" - self.output_mode = "STREAM" - self.input_stream = "unset" self.output_dir = "unset" self.output_header = "unset" @@ -58,8 +57,6 @@ def format_config(self): cfgstring += "config_file\t\t%s\n"%self.config_file cfgstring += "log_file\t\t%s\n"%self.log_file - cfgstring += "output_mode\t\t%s\n"%self.output_mode - cfgstring += "input_stream\t\t%s\n"%self.input_stream cfgstring += "output_dir\t\t%s\n"%self.output_dir cfgstring += "output_header\t\t%s\n"%self.output_header @@ -85,23 +82,23 @@ def print_config(self): def create_level1(self): self.process_id = self.db.create_level1_process(self.run_number,self.node_id) - if self.process_id == -1: return "error" + if self.process_id == -1: + print "Level1::create_level1 - ERROR: unable to create new Level1 process in DB" + return "error" self.db.add_cfg_para_level1(self.process_id,"daq_dir", self.daq_dir) self.db.add_cfg_para_level1(self.process_id,"ssh_id_file", self.ssh_id_file) self.db.add_cfg_para_level1(self.process_id,"executable", self.executable) - self.db.add_cfg_para_level1(self.process_id,"run_number", repr(self.run_number)) + #self.db.add_cfg_para_level1(self.process_id,"run_number", repr(self.run_number)) self.db.add_cfg_para_level1(self.process_id,"level1_id", repr(self.level1_id)) - self.db.add_cfg_para_level1(self.process_id,"node_id", repr(self.node_id)) + #self.db.add_cfg_para_level1(self.process_id,"node_id", repr(self.node_id)) self.db.add_cfg_para_level1(self.process_id,"node_ip", self.node_ip) self.db.add_cfg_para_level1(self.process_id,"config_file", self.config_file) self.db.add_cfg_para_level1(self.process_id,"log_file", self.log_file) - self.db.add_cfg_para_level1(self.process_id,"output_mode", self.output_mode) - self.db.add_cfg_para_level1(self.process_id,"input_stream", self.input_stream) self.db.add_cfg_para_level1(self.process_id,"output_dir", self.output_dir) self.db.add_cfg_para_level1(self.process_id,"output_header",self.output_header) @@ -127,15 +124,13 @@ def start_level1(self): # Start Level1 process try: - #self.process = subprocess.Popen(command.split(),stdout=self.log_handle,stderr=subprocess.STDOUT,bufsize=1) self.process = subprocess.Popen(shlex.split(command),stdout=self.log_handle,stderr=subprocess.STDOUT,bufsize=1) except OSError as e: print "Level1::start_level1 - ERROR: Execution failed: %s",e return 0 # Tag start of process in DB - if self.run_number: - self.db.set_process_time_start(self.process_id) + if self.run_number: self.db.set_process_time_create(self.process_id) # Return process id return self.process.pid @@ -143,25 +138,24 @@ def start_level1(self): def stop_level1(self): # Tag stop process in DB - if self.run_number: - self.db.set_process_time_stop(self.process_id) # Wait up to 5 seconds for Level1 to stop for i in range(5): - if self.process.poll() != None: - # Process exited: clean up defunct process and close log file self.process.wait() self.log_handle.close() - return 1 - + if self.run_number: self.db.set_process_time_end(self.process_id) + retur True time.sleep(1) # Process did not stop smoothly: stop it + print "Level1::stop_level1 - WARNING: Level1 process did not stop on its own. Terminating it" self.process.terminate() time.sleep(1) if self.process.poll() != None: self.process.wait() self.log_handle.close() - return 0 + + if self.run_number: self.db.set_process_time_end(self.process_id) + return False diff --git a/RunControl/code/Merger.py b/RunControl/code/Merger.py index 5bc2958f..3f5bb137 100644 --- a/RunControl/code/Merger.py +++ b/RunControl/code/Merger.py @@ -29,12 +29,12 @@ def set_default_config(self): self.executable = os.getenv('PADME',".")+"/Level1/PadmeMerger.exe" self.run_number = 0 + self.process_id = -1 self.config_file = "undefined" - self.log_file = "undefined" - self.output_mode = "STREAM" + #self.output_mode = "STREAM" self.input_list = "undefined" self.output_list = "undefined" @@ -55,7 +55,7 @@ def format_config(self): cfgstring += "config_file\t\t%s\n"%self.config_file cfgstring += "log_file\t\t%s\n"%self.log_file - cfgstring += "output_mode\t\t%s\n"%self.output_mode + #cfgstring += "output_mode\t\t%s\n"%self.output_mode cfgstring += "input_list\t\t%s\n"%self.input_list cfgstring += "output_list\t\t%s\n"%self.output_list @@ -79,21 +79,23 @@ def print_config(self): def create_merger(self): self.process_id = self.db.create_merger_process(self.run_number,self.node_id) - if self.process_id == -1: return "error" + if self.process_id == -1: + print "Merger::create_merger - ERROR: unable to create new Merger process in DB" + return "error" self.db.add_cfg_para_merger(self.process_id,"daq_dir", self.daq_dir) self.db.add_cfg_para_merger(self.process_id,"ssh_id_file",self.ssh_id_file) self.db.add_cfg_para_merger(self.process_id,"executable", self.executable) - self.db.add_cfg_para_merger(self.process_id,"run_number", repr(self.run_number)) + #self.db.add_cfg_para_merger(self.process_id,"run_number", repr(self.run_number)) - self.db.add_cfg_para_merger(self.process_id,"node_id", repr(self.node_id)) + #self.db.add_cfg_para_merger(self.process_id,"node_id", repr(self.node_id)) self.db.add_cfg_para_merger(self.process_id,"node_ip", self.node_ip) self.db.add_cfg_para_merger(self.process_id,"config_file",self.config_file) self.db.add_cfg_para_merger(self.process_id,"log_file", self.log_file) - self.db.add_cfg_para_merger(self.process_id,"output_mode",self.output_mode) + #self.db.add_cfg_para_merger(self.process_id,"output_mode",self.output_mode) self.db.add_cfg_para_merger(self.process_id,"input_list", self.input_list) self.db.add_cfg_para_merger(self.process_id,"output_list",self.output_list) @@ -104,13 +106,11 @@ def start_merger(self): command = "%s -r %d -i %s -o %s"%(self.executable,self.run_number,self.input_list,self.output_list) - # Check if Merger runs on a remote node (node_id 0 is localhost) + # If Merger process runs on a remote node then start it using passwordless ssh connection if self.node_id != 0: - - # Start Merger on remote node using passwordless ssh connection command = "ssh -i %s %s '( %s )'"%(self.ssh_id_file,self.node_ip,command) - print "- Starting Merger" + print "- Starting Merger process" print command print " Log written to %s"%self.log_file @@ -125,35 +125,30 @@ def start_merger(self): return 0 # Tag start of process in DB - if self.run_number: - self.db.set_process_time_start(self.process_id) + if self.run_number: self.db.set_process_time_create(self.process_id) # Return process id return self.process.pid def stop_merger(self): - # Tag stop process in DB - if self.run_number: - self.db.set_process_time_stop(self.process_id) - # Wait up to 60 seconds for Merger to stop for i in range(60): - if self.process.poll() != None: - # Process exited: clean up defunct process and close log file self.process.wait() self.log_handle.close() - return 1 - + if self.run_number: self.db.set_process_time_end(self.process_id) + return True time.sleep(1) - # Process did not stop smoothly: stop it + # Process did not stop smoothly: terminate it + print "Merger::stop_merger - WARNING: Merger process did not stop on its own. Terminating it" self.process.terminate() time.sleep(1) if self.process.poll() != None: self.process.wait() self.log_handle.close() - return 0 + if self.run_number: self.db.set_process_time_end(self.process_id) + return False diff --git a/RunControl/code/PadmeDB.py b/RunControl/code/PadmeDB.py index 52b579ca..453c5277 100644 --- a/RunControl/code/PadmeDB.py +++ b/RunControl/code/PadmeDB.py @@ -228,22 +228,22 @@ def set_run_time_stop(self,run_nr): print "MySQL Error:%d:%s"%(e.args[0],e.args[1]) self.conn.commit() - def set_process_time_start(self,proc_id): + def set_process_time_create(self,proc_id): self.check_db() c = self.conn.cursor() try: - c.execute("""UPDATE process SET time_start = %s WHERE id = %s""",(self.now_str(),proc_id)) + c.execute("""UPDATE process SET time_create = %s WHERE id = %s""",(self.now_str(),proc_id)) except MySQLdb.Error as e: print "MySQL Error:%d:%s"%(e.args[0],e.args[1]) self.conn.commit() - def set_process_time_stop(self,proc_id): + def set_process_time_end(self,proc_id): self.check_db() c = self.conn.cursor() try: - c.execute("""UPDATE process SET time_stop = %s WHERE id = %s""",(self.now_str(),proc_id)) + c.execute("""UPDATE process SET time_end = %s WHERE id = %s""",(self.now_str(),proc_id)) except MySQLdb.Error as e: print "MySQL Error:%d:%s"%(e.args[0],e.args[1]) self.conn.commit() diff --git a/RunControl/code/Trigger.py b/RunControl/code/Trigger.py index c6a6d9bf..edd8adc7 100644 --- a/RunControl/code/Trigger.py +++ b/RunControl/code/Trigger.py @@ -31,8 +31,9 @@ def set_default_config(self): self.executable = os.getenv('PADME',".")+"/PadmeTrig/PadmeTrig.exe" self.run_number = 0 + self.process_id = -1 - self.process_mode = "DAQ" + #self.process_mode = "DAQ" self.config_file = "unset" @@ -44,6 +45,9 @@ def set_default_config(self): self.output_mode = "STREAM" self.output_stream = "unset" + self.data_dir = "unset" + self.data_file = "trig" + self.start_file = "unset" self.quit_file = "unset" @@ -56,7 +60,7 @@ def set_default_config(self): self.busy_mask = int('0x10',0) # BTF trigger settings (get all triggers, no autopass) - self.trig0_scale_global = 1 + #self.trig0_scale_global = 1 self.trig0_scale_autopass = 0 # External triggers settings (get all triggers, no autopass) @@ -76,12 +80,22 @@ def set_default_config(self): self.trig6_scale_autopass = 1 self.trig7_scale_global = 1 self.trig7_scale_autopass = 1 + + # Delay for trigger 0 (BTF) + self.trigger0_delay = int('0x7e',0) + + # Delay of correlated trigger wrt trigger 0 self.correlated_trigger_delay = int('0x01f4 ',0) # Default timepix settings self.timepix_shutter_delay = int('0x02',0) self.timepix_shutter_width = int('0xff',0) + # Default file parameters + self.file_max_duration = 3600 + self.file_max_size = 1024*1024*1024 + self.file_max_events = 1000*1000 + # Default DAQ control parameters self.daq_loop_delay = 10000 self.debug_scale = 100 @@ -125,6 +139,7 @@ def read_setup(self,setup): elif (p_name == "trig6_scale_autopass"): self.trig6_scale_autopass = int(p_value,0) elif (p_name == "trig7_scale_global"): self.trig7_scale_global = int(p_value,0) elif (p_name == "trig7_scale_autopass"): self.trig7_scale_autopass = int(p_value,0) + elif (p_name == "trigger0_delay"): self.trigger0_delay = int(p_value,0) elif (p_name == "correlated_trigger_delay"): self.correlated_trigger_delay = int(p_value,0) elif (p_name == "timepix_shutter_delay"): self.timepix_shutter_delay = int(p_value,0) elif (p_name == "timepix_shutter_width"): self.timepix_shutter_width = int(p_value,0) @@ -147,7 +162,7 @@ def format_config(self): cfgstring += "quit_file\t\t\t%s\n"%self.quit_file cfgstring += "run_number\t\t\t%d\n"%self.run_number - cfgstring += "process_mode\t\t\t%s\n"%self.process_mode + #cfgstring += "process_mode\t\t\t%s\n"%self.process_mode if (self.run_number): cfgstring += "process_id\t\t\t%d\n"%self.process_id cfgstring += "node_id\t\t\t\t%d\n"%self.node_id @@ -160,7 +175,14 @@ def format_config(self): cfgstring += "initfail_file\t\t\t%s\n"%self.initfail_file cfgstring += "output_mode\t\t\t%s\n"%self.output_mode - cfgstring += "output_stream\t\t\t%s\n"%self.output_stream + if self.output_mode == "STREAM": + cfgstring += "output_stream\t\t\t%s\n"%self.output_stream + elif self.output_mode == "FILE": + cfgstring += "data_dir\t\t\t%s\n"%self.data_dir + cfgstring += "data_file\t\t\t%s\n"%self.data_file + cfgstring += "file_max_duration\t\t%d\n"%self.file_max_duration + cfgstring += "file_max_size\t\t\t%d\n"%self.file_max_size + cfgstring += "file_max_events\t\t%d\n"%self.file_max_events cfgstring += "total_daq_time\t\t\t%d\n"%self.total_daq_time @@ -170,10 +192,12 @@ def format_config(self): cfgstring += "trigger_mask\t\t\t0x%02x\n"%self.trigger_mask cfgstring += "busy_mask\t\t\t0x%02x\n"%self.busy_mask + cfgstring += "trigger0_delay\t0x%02x\n"%self.trigger0_delay + cfgstring += "correlated_trigger_delay\t0x%04x\n"%self.correlated_trigger_delay if (self.trigger_mask & 0x01): - cfgstring += "trig0_scale_global\t\t%d\n"%self.trig0_scale_global + #cfgstring += "trig0_scale_global\t\t%d\n"%self.trig0_scale_global cfgstring += "trig0_scale_autopass\t\t%d\n"%self.trig0_scale_autopass if (self.trigger_mask & 0x02): @@ -208,6 +232,7 @@ def format_config(self): cfgstring += "timepix_shutter_width\t\t0x%02x\n"%self.timepix_shutter_width cfgstring += "daq_loop_delay\t\t\t%d\n"%self.daq_loop_delay + cfgstring += "debug_scale\t\t\t%d\n"%self.debug_scale return cfgstring @@ -230,8 +255,8 @@ def create_trigger(self): # Create Trigger process in DB self.process_id = self.db.create_trigger_process(self.run_number,self.node_id) - if self.proc_daq_id == -1: - print "ADCBoard::create_proc_daq - ERROR: unable to create new DAQ proces in DB" + if self.process_id == -1: + print "Trigger::create_trigger - ERROR: unable to create new Trigger process in DB" return "error" self.db.add_cfg_para_trigger(self.process_id,"daq_dir", self.daq_dir) @@ -240,10 +265,10 @@ def create_trigger(self): self.db.add_cfg_para_trigger(self.process_id,"start_file", self.start_file) self.db.add_cfg_para_trigger(self.process_id,"quit_file", self.quit_file) - self.db.add_cfg_para_trigger(self.process_id,"run_number", repr(self.run_number)) - self.db.add_cfg_para_trigger(self.process_id,"process_mode", self.process_mode) + #self.db.add_cfg_para_trigger(self.process_id,"run_number", repr(self.run_number)) + #self.db.add_cfg_para_trigger(self.process_id,"process_mode", self.process_mode) - self.db.add_cfg_para_trigger(self.process_id,"node_id", repr(self.node_id)) + #self.db.add_cfg_para_trigger(self.process_id,"node_id", repr(self.node_id)) self.db.add_cfg_para_trigger(self.process_id,"node_ip", self.node_ip) self.db.add_cfg_para_trigger(self.process_id,"config_file", self.config_file) @@ -251,49 +276,66 @@ def create_trigger(self): self.db.add_cfg_para_trigger(self.process_id,"lock_file", self.lock_file) self.db.add_cfg_para_trigger(self.process_id,"initok_file", self.initok_file) self.db.add_cfg_para_trigger(self.process_id,"initfail_file", self.initfail_file) - - self.db.add_cfg_para_trigger(self.process_id,"output_mode", self.output_mode) - self.db.add_cfg_para_trigger(self.process_id,"output_stream", self.output_stream) - self.db.add_cfg_para_trigger(self.process_id,"total_daq_time", repr(self.total_daq_time)) + self.db.add_cfg_para_trigger(self.process_id,"output_mode", self.output_mode) + if self.output_mode == "STREAM": + self.db.add_cfg_para_trigger(self.process_id,"output_stream", self.output_stream) + elif self.output_mode == "FILE": + self.db.add_cfg_para_trigger(self.process_id,"data_dir", self.data_dir) + self.db.add_cfg_para_trigger(self.process_id,"data_file", self.data_file) + self.db.add_cfg_para_trigger(self.process_id,"file_max_duration", self.file_max_duration) + self.db.add_cfg_para_trigger(self.process_id,"file_max_size", self.file_max_size) + self.db.add_cfg_para_trigger(self.process_id,"file_max_events", self.file_max_events) - self.db.add_cfg_para_trigger(self.process_id,"trigger_addr", self.trigger_addr) - self.db.add_cfg_para_trigger(self.process_id,"trigger_port", repr(self.trigger_port)) + self.db.add_cfg_para_trigger(self.process_id,"total_daq_time", repr(self.total_daq_time)) - self.db.add_cfg_para_trigger(self.process_id,"trigger_mask", "%#02x"%self.trigger_mask) - self.db.add_cfg_para_trigger(self.process_id,"busy_mask", "%#02x"%self.busy_mask) + self.db.add_cfg_para_trigger(self.process_id,"trigger_addr", self.trigger_addr) + self.db.add_cfg_para_trigger(self.process_id,"trigger_port", repr(self.trigger_port)) - self.db.add_cfg_para_trigger(self.process_id,"trig0_scale_global", repr(self.trig0_scale_global)) - self.db.add_cfg_para_trigger(self.process_id,"trig0_scale_autopass", repr(self.trig0_scale_autopass)) + self.db.add_cfg_para_trigger(self.process_id,"trigger_mask", "%#02x"%self.trigger_mask) + self.db.add_cfg_para_trigger(self.process_id,"busy_mask", "%#02x"%self.busy_mask) - self.db.add_cfg_para_trigger(self.process_id,"trig1_scale_global", repr(self.trig1_scale_global)) - self.db.add_cfg_para_trigger(self.process_id,"trig1_scale_autopass", repr(self.trig1_scale_autopass)) + self.db.add_cfg_para_trigger(self.process_id,"trigger0_delay", "%#02x"%self.trigger0_delay) - self.db.add_cfg_para_trigger(self.process_id,"trig2_scale_global", repr(self.trig2_scale_global)) - self.db.add_cfg_para_trigger(self.process_id,"trig2_scale_autopass", repr(self.trig2_scale_autopass)) + self.db.add_cfg_para_trigger(self.process_id,"correlated_trigger_delay", "%#04x"%self.correlated_trigger_delay) + if (self.trigger_mask & 0x01): + #self.db.add_cfg_para_trigger(self.process_id,"trig0_scale_global", repr(self.trig0_scale_global)) + self.db.add_cfg_para_trigger(self.process_id,"trig0_scale_autopass", repr(self.trig0_scale_autopass)) - self.db.add_cfg_para_trigger(self.process_id,"trig3_scale_global", repr(self.trig3_scale_global)) - self.db.add_cfg_para_trigger(self.process_id,"trig3_scale_autopass", repr(self.trig3_scale_autopass)) + if (self.trigger_mask & 0x02): + self.db.add_cfg_para_trigger(self.process_id,"trig1_scale_global", repr(self.trig1_scale_global)) + self.db.add_cfg_para_trigger(self.process_id,"trig1_scale_autopass", repr(self.trig1_scale_autopass)) - self.db.add_cfg_para_trigger(self.process_id,"trig4_scale_global", repr(self.trig4_scale_global)) - self.db.add_cfg_para_trigger(self.process_id,"trig4_scale_autopass", repr(self.trig4_scale_autopass)) + if (self.trigger_mask & 0x04): + self.db.add_cfg_para_trigger(self.process_id,"trig2_scale_global", repr(self.trig2_scale_global)) + self.db.add_cfg_para_trigger(self.process_id,"trig2_scale_autopass", repr(self.trig2_scale_autopass)) - self.db.add_cfg_para_trigger(self.process_id,"trig5_scale_global", repr(self.trig5_scale_global)) - self.db.add_cfg_para_trigger(self.process_id,"trig5_scale_autopass", repr(self.trig5_scale_autopass)) + if (self.trigger_mask & 0x08): + self.db.add_cfg_para_trigger(self.process_id,"trig3_scale_global", repr(self.trig3_scale_global)) + self.db.add_cfg_para_trigger(self.process_id,"trig3_scale_autopass", repr(self.trig3_scale_autopass)) - self.db.add_cfg_para_trigger(self.process_id,"trig6_scale_global", repr(self.trig6_scale_global)) - self.db.add_cfg_para_trigger(self.process_id,"trig6_scale_autopass", repr(self.trig6_scale_autopass)) + if (self.trigger_mask & 0x10): + self.db.add_cfg_para_trigger(self.process_id,"trig4_scale_global", repr(self.trig4_scale_global)) + self.db.add_cfg_para_trigger(self.process_id,"trig4_scale_autopass", repr(self.trig4_scale_autopass)) - self.db.add_cfg_para_trigger(self.process_id,"trig7_scale_global", repr(self.trig7_scale_global)) - self.db.add_cfg_para_trigger(self.process_id,"trig7_scale_autopass", repr(self.trig7_scale_autopass)) + if (self.trigger_mask & 0x20): + self.db.add_cfg_para_trigger(self.process_id,"trig5_scale_global", repr(self.trig5_scale_global)) + self.db.add_cfg_para_trigger(self.process_id,"trig5_scale_autopass", repr(self.trig5_scale_autopass)) - self.db.add_cfg_para_trigger(self.process_id,"correlated_trigger_delay","%#04x"%self.correlated_trigger_delay) + if (self.trigger_mask & 0x40): + self.db.add_cfg_para_trigger(self.process_id,"trig6_scale_global", repr(self.trig6_scale_global)) + self.db.add_cfg_para_trigger(self.process_id,"trig6_scale_autopass", repr(self.trig6_scale_autopass)) - self.db.add_cfg_para_trigger(self.process_id,"timepix_shutter_delay", "%#02x"%self.timepix_shutter_delay) - self.db.add_cfg_para_trigger(self.process_id,"timepix_shutter_width", "%#02x"%self.timepix_shutter_width) + if (self.trigger_mask & 0x80): + self.db.add_cfg_para_trigger(self.process_id,"trig7_scale_global", repr(self.trig7_scale_global)) + self.db.add_cfg_para_trigger(self.process_id,"trig7_scale_autopass", repr(self.trig7_scale_autopass)) + + self.db.add_cfg_para_trigger(self.process_id,"timepix_shutter_delay", "%#02x"%self.timepix_shutter_delay) + self.db.add_cfg_para_trigger(self.process_id,"timepix_shutter_width", "%#02x"%self.timepix_shutter_width) + + self.db.add_cfg_para_trigger(self.process_id,"daq_loop_delay", repr(self.daq_loop_delay)) - self.db.add_cfg_para_trigger(self.process_id,"daq_loop_delay", repr(self.daq_loop_delay)) - self.db.add_cfg_para_trigger(self.process_id,"debug_scale", repr(self.debug_scale)) + self.db.add_cfg_para_trigger(self.process_id,"debug_scale", repr(self.debug_scale)) return "ok" @@ -305,7 +347,7 @@ def start_trig(self): if self.node_id != 0: command = "ssh -i %s %s '( %s )'"%(self.ssh_id_file,self.node_ip,command) - print "- Start Trigger process" + print "- Starting Trigger process" print command print " Log written to %s"%self.log_file @@ -320,31 +362,25 @@ def start_trig(self): return 0 # Tag start of process in DB - if self.run_number: - self.db.set_process_time_start(self.process_id) + if self.run_number: self.db.set_process_time_create(self.process_id) # Return process id return self.process.pid def stop_trig(self): - # Tag stop process in DB - if self.run_number: - self.db.set_process_time_stop(self.process_id) - # Wait up to 5 seconds for DAQ to stop of its own (on quit file or on time elapsed) for i in range(10): - if self.process.poll() != None: - # Process exited: clean up defunct process and close log file self.process.wait() self.log_handle.close() - return 1 - + if self.run_number: self.db.set_process_time_end(self.process_id) + return True time.sleep(0.5) - # Process did not stop: try sending and interrupt + # Process did not stop on its own: try sending and interrupt + print "Trigger::stop_trig - WARNING: Trigger process did not stop on its own. Sending interrupt" if self.node_id == 0: # If process is on local host, just send a kill signal command = "kill %d"%self.process.pid @@ -352,25 +388,26 @@ def stop_trig(self): # If it is on a remote host, use ssh to send kill command. # PID on remote host is recovered from the lock file command = "ssh -i %s %s '( kill `cat %s` )'"%(self.ssh_id_file,self.node_ip,self.lock_file) - print command - os.system(command) + print command + os.system(command) # Wait up to 5 seconds for DAQ to stop on interrupt for i in range(10): - if self.process.poll() != None: - # Process exited: clean up defunct process and close log file self.process.wait() self.log_handle.close() - return 1 - + if self.run_number: self.db.set_process_time_end(self.process_id) + return True time.sleep(0.5) # Process did not stop smoothly: terminate it + print "Trigger::stop_trig - WARNING: Trigger process did not stop on interrupt. Terminating it" self.process.terminate() time.sleep(1) if self.process.poll() != None: self.process.wait() self.log_handle.close() - return 0 + + if self.run_number: self.db.set_process_time_end(self.process_id) + return False From 90709c234b6a21f49cc8a0702d47cf578abbbc18 Mon Sep 17 00:00:00 2001 From: Emanuele Leonardi Date: Wed, 27 Nov 2019 09:30:26 +0100 Subject: [PATCH 24/64] PadmeDAQ: updated code for new DB schema --- PadmeDAQ/src/DAQ.c | 219 +++++++++-------------- PadmeDAQ/src/FAKE.c | 6 +- PadmeDAQ/src/PadmeDAQ.c | 376 +++++++++++++++------------------------- PadmeDAQ/src/ZSUP.c | 182 +++++++++---------- 4 files changed, 318 insertions(+), 465 deletions(-) diff --git a/PadmeDAQ/src/DAQ.c b/PadmeDAQ/src/DAQ.c index e924316e..a0d274f3 100644 --- a/PadmeDAQ/src/DAQ.c +++ b/PadmeDAQ/src/DAQ.c @@ -31,83 +31,6 @@ int Handle; // Handle for V1742 digitizer extern int InBurst; extern int BreakSignal; -//int InBurst; // =0 DAQ not running, >0 DAQ running - -//int BreakSignal = 0; // If >0 an interrupt was received and DAQ should exit - -// Function to handle interrupts -//void termination_handler (int signum) -//{ -// if (InBurst > 0) { -// -// // If DAQ is running, don't stop abruptly -// // Just tell the main to exit when DAQ allows it -// printf ("\n### Interrupt Received while in burst mode - Signal: %d\n", signum); -// printf ("--- Waiting for DAQ to terminate gracefully... \n"); -// if (signum!=0) { -// BreakSignal = signum; -// } else { -// BreakSignal = -1; -// } -// // Ignore additional interrupts -// signal (SIGINT, SIG_IGN); -// signal (SIGHUP, SIG_IGN); -// signal (SIGTERM, SIG_IGN); -// signal (SIGFPE, SIG_DFL); -// -// } else { -// -// printf ("\n### Interrupt Received in idle mode - Signal: %d\n", signum); -// printf ("--- Terminating DAQ program... \n"); -// signal (SIGINT, SIG_DFL); // Reset interrupt handler -// signal (SIGHUP, SIG_DFL); -// signal (SIGTERM, SIG_DFL); -// signal (SIGFPE, SIG_DFL); -// DAQ_close (); -// //remove_lock(); // will leave dangling lock file -// printf("= Hope you enjoyed the ride. Bye!\n"); -// exit(0); -// -// } -//} - -// Set interrupt handling functions -//void set_signal_handlers() -//{ -// printf ("Trap signals:"); -// signal (SIGINT, termination_handler); -// printf (" SIGINT %d", SIGINT); -// signal (SIGHUP, termination_handler); -// printf (" SIGHUP %d", SIGHUP); -// signal (SIGTERM, termination_handler); -// printf (" SIGTERM %d", SIGTERM); -// //signal (SIGUSR2, termination_handler); // do we need this? -// //printf (" %d", SIGUSR2); -// signal (SIGFPE, termination_handler); -// printf (" SIGFPE %d", SIGFPE); -// printf ("\n"); -//} - -//// Return file name given the file open time. Return 0 if OK, <>0 error -//int generate_filename(char* name, const time_t time) { -// struct tm* t = localtime(&time); -// sprintf(name,"%s_%.4d_%.2d_%.2d_%.2d_%.2d_%.2d", -// Config->data_file, -// 1900+t->tm_year, 1+t->tm_mon, t->tm_mday, -// t->tm_hour, t->tm_min, t->tm_sec); -// return 0; -//} - -//// Write time (in secs) to a string with standard formatting -//char* format_time(const time_t time) { -// static char tform[20]; -// struct tm* t = localtime(&time); -// sprintf(tform,"%.4d/%.2d/%.2d %.2d:%.2d:%.2d", -// 1900+t->tm_year, 1+t->tm_mon, t->tm_mday, -// t->tm_hour, t->tm_min, t->tm_sec); -// return tform; -//} - // Get LinkNum (link address of port on A3818 boards) int get_LinkNum() { @@ -180,11 +103,12 @@ int DAQ_connect () // Get board serial number and save it to DB Config->board_sn = boardInfo.SerialNumber; - if ( Config->run_number ) { - char outstr[2048]; - sprintf(outstr,"%d",Config->board_sn); - db_add_cfg_para(Config->process_id,"board_sn",outstr); - } + //if ( Config->run_number ) { + // char outstr[2048]; + // sprintf(outstr,"%d",Config->board_sn); + // db_add_cfg_para(Config->process_id,"board_sn",outstr); + //} + printf("DBINFO - add_proc_cfg_para %d %s %d\n",Config->process_id,"board_sn",Config->board_sn); return 0; @@ -784,10 +708,12 @@ int DAQ_readdata () // DAQ is now ready to start. Create InitOK file and set status to INITIALIZED if ( create_initok_file() ) return 1; - if (Config->run_number) { - printf("- Setting process status to INITIALIZED (%d) in DB\n",DB_STATUS_INITIALIZED); - db_process_set_status(Config->process_id,DB_STATUS_INITIALIZED); - } + //if (Config->run_number) { + // printf("- Setting process status to INITIALIZED (%d) in DB\n",DB_STATUS_INITIALIZED); + // db_process_set_status(Config->process_id,DB_STATUS_INITIALIZED); + //} + printf("- Setting process status to INITIALIZED (%d)\n",DB_STATUS_INITIALIZED); + printf("DBINFO - process_set_status %d %d\n",Config->process_id,DB_STATUS_INITIALIZED); if (Config->startdaq_mode == 0) { @@ -826,12 +752,15 @@ int DAQ_readdata () time(&t_daqstart); printf("%s - Acquisition started\n",format_time(t_daqstart)); - if ( Config->run_number ) { - // Tell DB that the process has started - printf("- Setting process status to RUNNING (%d) in DB\n",DB_STATUS_RUNNING); - db_process_set_status(Config->process_id,DB_STATUS_RUNNING); - if ( db_process_open(Config->process_id,t_daqstart) != DB_OK ) return 2; - } + //if ( Config->run_number ) { + // // Tell DB that the process has started + // printf("- Setting process status to RUNNING (%d) in DB\n",DB_STATUS_RUNNING); + // db_process_set_status(Config->process_id,DB_STATUS_RUNNING); + // if ( db_process_open(Config->process_id,t_daqstart) != DB_OK ) return 2; + //} + printf("- Setting process status to RUNNING (%d)\n",DB_STATUS_RUNNING); + printf("DBINFO - process_set_status %d %d\n",Config->process_id,DB_STATUS_RUNNING); + printf("DBINFO - process_set_time_start %d %s\n",Config->process_id,format_time(t_daqstart)); // Zero counters totalReadSize = 0; @@ -849,16 +778,16 @@ int DAQ_readdata () generate_filename(tmpName,t_daqstart); fileName[fileIndex] = (char*)malloc(strlen(tmpName)+1); strcpy(fileName[fileIndex],tmpName); - if ( Config->run_number ) { - rc = db_file_check(fileName[fileIndex]); - if ( rc < 0 ) { - printf("ERROR: DB check for file %s returned an error\n",fileName[fileIndex]); - return 2; - } else if ( rc == 1 ) { - printf("ERROR: file %s already exists in the DB\n",fileName[fileIndex]); - return 2; - } - } + //if ( Config->run_number ) { + // rc = db_file_check(fileName[fileIndex]); + // if ( rc < 0 ) { + // printf("ERROR: DB check for file %s returned an error\n",fileName[fileIndex]); + // return 2; + // } else if ( rc == 1 ) { + // printf("ERROR: file %s already exists in the DB\n",fileName[fileIndex]); + // return 2; + // } + //} pathName[fileIndex] = (char*)malloc(strlen(Config->data_dir)+strlen(fileName[fileIndex])+1); strcpy(pathName[fileIndex],Config->data_dir); strcat(pathName[fileIndex],fileName[fileIndex]); @@ -906,8 +835,12 @@ int DAQ_readdata () fileEvents[fileIndex] = 0; // Register file in the DB - if ( Config->run_number && strcmp(Config->output_mode,"FILE")==0 ) { - if ( db_file_open(fileName[fileIndex],PEVT_CURRENT_VERSION,fileTOpen[fileIndex],Config->process_id,fileIndex) != DB_OK ) return 2; + //if ( Config->run_number && strcmp(Config->output_mode,"FILE")==0 ) { + // if ( db_file_open(fileName[fileIndex],PEVT_CURRENT_VERSION,fileTOpen[fileIndex],Config->process_id,fileIndex) != DB_OK ) return 2; + //} + if ( strcmp(Config->output_mode,"FILE")==0 ) { + printf("DBINFO - file_create %s %s %d %d %d\n",fileName[fileIndex],"DAQDATA",PEVT_CURRENT_VERSION,Config->process_id,fileIndex); + printf("DBINFO - file_set_time_open %s %s\n",fileName[fileIndex],format_time(fileTOpen[fileIndex])); } // Write header to file @@ -1143,7 +1076,8 @@ int DAQ_readdata () fTailSize,writeSize); return 2; // As this is an error while writing data to output file, no point in sending file tail } - fileSize[fileIndex] += fTailSize; + fileSize[fileIndex] += writeSize; + totalWriteSize += writeSize; // Close old output file and show some info about counters if (close(fileHandle) == -1) { @@ -1156,9 +1090,12 @@ int DAQ_readdata () fileEvents[fileIndex],fileSize[fileIndex]); // Close file in DB - if ( Config->run_number ) { - if ( db_file_close(fileName[fileIndex],fileTClose[fileIndex],fileSize[fileIndex],fileEvents[fileIndex]) != DB_OK ) return 2; - } + //if ( Config->run_number ) { + // if ( db_file_close(fileName[fileIndex],fileTClose[fileIndex],fileSize[fileIndex],fileEvents[fileIndex]) != DB_OK ) return 2; + //} + printf("DBINFO - file_set_time_close %s %s\n",fileName[fileIndex],format_time(fileTClose[fileIndex])); + printf("DBINFO - file_set_size %s %lu\n",fileName[fileIndex],fileSize[fileIndex]); + printf("DBINFO - file_set_n_events %s %u\n",fileName[fileIndex],fileEvents[fileIndex]); // Update file counter fileIndex++; @@ -1169,16 +1106,16 @@ int DAQ_readdata () generate_filename(tmpName,t_now); fileName[fileIndex] = (char*)malloc(strlen(tmpName)+1); strcpy(fileName[fileIndex],tmpName); - if ( Config->run_number ) { - rc = db_file_check(fileName[fileIndex]); - if ( rc < 0 ) { - printf("ERROR: DB check for file %s returned an error\n",fileName[fileIndex]); - return 2; - } else if ( rc == 1 ) { - printf("ERROR: file %s already exists in the DB\n",fileName[fileIndex]); - return 2; - } - } + //if ( Config->run_number ) { + // rc = db_file_check(fileName[fileIndex]); + // if ( rc < 0 ) { + // printf("ERROR: DB check for file %s returned an error\n",fileName[fileIndex]); + // return 2; + // } else if ( rc == 1 ) { + // printf("ERROR: file %s already exists in the DB\n",fileName[fileIndex]); + // return 2; + // } + //} pathName[fileIndex] = (char*)malloc(strlen(Config->data_dir)+strlen(fileName[fileIndex])+1); strcpy(pathName[fileIndex],Config->data_dir); strcat(pathName[fileIndex],fileName[fileIndex]); @@ -1193,9 +1130,11 @@ int DAQ_readdata () fileEvents[fileIndex] = 0; // Register file in the DB - if ( Config->run_number ) { - if ( db_file_open(fileName[fileIndex],PEVT_CURRENT_VERSION,fileTOpen[fileIndex],Config->process_id,fileIndex) != DB_OK ) return 2; - } + //if ( Config->run_number ) { + // if ( db_file_open(fileName[fileIndex],PEVT_CURRENT_VERSION,fileTOpen[fileIndex],Config->process_id,fileIndex) != DB_OK ) return 2; + //} + printf("DBINFO - file_create %s %s %d %d %d\n",fileName[fileIndex],"DAQDATA",PEVT_CURRENT_VERSION,Config->process_id,fileIndex); + printf("DBINFO - file_set_time_open %s %s\n",fileName[fileIndex],format_time(fileTOpen[fileIndex])); // Write header to file fHeadSize = create_file_head(fileIndex,Config->run_number,Config->board_id,Config->board_sn,fileTOpen[fileIndex],(void *)outEvtBuffer); @@ -1205,7 +1144,8 @@ int DAQ_readdata () fHeadSize,writeSize); return 2; // As this is an error while writing data to output file, no point in sending file tail } - fileSize[fileIndex] += fHeadSize; + fileSize[fileIndex] += writeSize; + totalWriteSize += writeSize; } else { @@ -1230,6 +1170,9 @@ int DAQ_readdata () } + // Get time when processing ends + time(&t_now); + // Tell user what stopped DAQ if ( adcError ) printf("=== Stopping DAQ on ADC access or data handling ERROR ===\n"); if ( BreakSignal ) printf("=== Stopping DAQ on interrupt %d ===\n",BreakSignal); @@ -1253,7 +1196,8 @@ int DAQ_readdata () fTailSize,writeSize); return 2; } - fileSize[fileIndex] += fTailSize; + fileSize[fileIndex] += writeSize; + totalWriteSize += writeSize; // Close output file and show some info about counters if (close(fileHandle) == -1) { @@ -1262,14 +1206,17 @@ int DAQ_readdata () }; if ( strcmp(Config->output_mode,"FILE")==0 ) { printf("%s - Closed output file '%s' after %d secs with %u events and size %lu bytes\n", - format_time(t_now),pathName[fileIndex],(int)(fileTClose[fileIndex]-fileTOpen[fileIndex]), + format_time(fileTClose[fileIndex]),pathName[fileIndex],(int)(fileTClose[fileIndex]-fileTOpen[fileIndex]), fileEvents[fileIndex],fileSize[fileIndex]); - if ( Config->run_number ) { - if ( db_file_close(fileName[fileIndex],fileTClose[fileIndex],fileSize[fileIndex],fileEvents[fileIndex]) != DB_OK ) return 2; - } + //if ( Config->run_number ) { + // if ( db_file_close(fileName[fileIndex],fileTClose[fileIndex],fileSize[fileIndex],fileEvents[fileIndex]) != DB_OK ) return 2; + //} + printf("DBINFO - file_set_time_close %s %s\n",fileName[fileIndex],format_time(fileTClose[fileIndex])); + printf("DBINFO - file_set_size %s %lu\n",fileName[fileIndex],fileSize[fileIndex]); + printf("DBINFO - file_set_n_events %s %u\n",fileName[fileIndex],fileEvents[fileIndex]); } else { printf("%s - Closed output stream '%s' after %d secs with %u events and size %lu bytes\n", - format_time(t_now),pathName[fileIndex],(int)(fileTClose[fileIndex]-fileTOpen[fileIndex]), + format_time(fileTClose[fileIndex]),pathName[fileIndex],(int)(fileTClose[fileIndex]-fileTOpen[fileIndex]), fileEvents[fileIndex],fileSize[fileIndex]); } @@ -1313,10 +1260,16 @@ int DAQ_readdata () free(outEvtBuffer); // Tell DB that the process has ended - if ( Config->run_number ) { - //if ( db_process_close(Config->process_id,t_daqstop) != DB_OK ) return 2; - if ( db_process_close(Config->process_id,t_daqstop,totalWriteSize,totalWriteEvents) != DB_OK ) return 2; - } + //if ( Config->run_number ) { + // //if ( db_process_close(Config->process_id,t_daqstop) != DB_OK ) return 2; + // if ( db_process_close(Config->process_id,t_daqstop,totalWriteSize,totalWriteEvents) != DB_OK ) return 2; + //} + printf("- Setting process status to FINISHED (%d)\n",DB_STATUS_FINISHED); + printf("DBINFO - process_set_status %d %d\n",Config->process_id,DB_STATUS_FINISHED); + printf("DBINFO - process_set_time_stop %d %s\n",Config->process_id,format_time(t_daqstop)); + printf("DBINFO - process_set_n_files %d %d\n",Config->process_id,fileIndex); + printf("DBINFO - process_set_total_events %d %d\n",Config->process_id,totalWriteEvents); + printf("DBINFO - process_set_total_size %d %ld\n",Config->process_id,totalWriteSize); // Give some final report evtReadPerSec = 0.; @@ -1337,7 +1290,7 @@ int DAQ_readdata () printf("Total number of events written: %u - %6.2f events/s\n",totalWriteEvents,evtWritePerSec); printf("Total size of data written: %lu B - %6.2f KB/s\n",totalWriteSize,sizeWritePerSec); if ( strcmp(Config->output_mode,"FILE")==0 ) { - printf("=== Files created =======================================\n"); + printf("=== %2d Files created =======================================\n",fileIndex); for(i=0;ilock_file,F_OK) != -1 ) { - lock_handle = fopen(Config->lock_file,"r"); - if ( fscanf(lock_handle,"%d",&pid) == 0 ) { - printf("PadmeDAQ::create_lock - ERROR - Lock file '%s' found but could not read the PID\n",Config->lock_file); - pid = -1; - } - fclose(lock_handle); - return pid; - } - - // Check if directory for lock file exists - char* path = strdup(Config->lock_file); - char* lock_dir = dirname(path); // N.B. dirname modifies its argument! - if ( stat(lock_dir,&sb) == 0 && S_ISDIR(sb.st_mode) ) { - - // Create lock file and write own pid in it - lock_handle = fopen(Config->lock_file,"w"); - fprintf(lock_handle,"%d\n",getpid()); - fclose(lock_handle); - printf("PadmeDAQ::create_lock - Lock file '%s' created for PID %d\n",Config->lock_file,getpid()); - return 0; - - } - - printf("PadmeDAQ::create_lock - ERROR - Directory '%s' does not exist: cannot create lock file '%s'\n",lock_dir,Config->lock_file); - return -1; - -} - -// Remove lock file (called just before exiting) -void remove_lock() -{ - struct stat sb; - - // Check if lock file directory exists - char* path = strdup(Config->lock_file); - char* lock_dir = dirname(path); // N.B. dirname modifies its argument! - if ( stat(lock_dir,&sb) == 0 && S_ISDIR(sb.st_mode) ) { - if ( unlink(Config->lock_file) ) { - printf("PadmeDAQ::remove_lock - ERROR - Problem while removing lock file '%s'\n",Config->lock_file); - } else { - printf("PadmeDAQ::remove_lock - Lock file '%s' removed\n",Config->lock_file); - } - } else { - printf("PadmeDAQ::remove_lock - ERROR - Directory '%s' does not exist: cannot remove lock file '%s'.\n",lock_dir,Config->lock_file); - } - - return; - -} - -int create_initok_file() -{ - - FILE* iff; - struct stat sb; - - // Check if directory for initok file exists - char* path = strdup(Config->initok_file); - char* iff_dir = dirname(path); // N.B. dirname modifies its argument! - if ( stat(iff_dir,&sb) != 0 || ! S_ISDIR(sb.st_mode) ) { - printf("PadmeDAQ::create_initok_file - Directory '%s' does not exist: cannot create InitOK file '%s'.\n",iff_dir,Config->initok_file); - return 1; - } - - // Check if file is already there (not cleaned in previous run?) - if ( access(Config->initok_file,F_OK) != -1 ) { - printf("PadmeDAQ::create_initok_file - InitOK file '%s' already exists.\n",Config->initok_file); - return 1; - } - - // Create InitOK file - iff = fopen(Config->initok_file,"w"); - fclose(iff); - printf("- InitOK file '%s' created\n",Config->initok_file); - return 0; - -} - -int create_initfail_file() -{ - - FILE* iff; - struct stat sb; - - // Check if directory for initfail file exists - char* path = strdup(Config->initfail_file); - char* iff_dir = dirname(path); // N.B. dirname modifies its argument! - if ( stat(iff_dir,&sb) != 0 || ! S_ISDIR(sb.st_mode) ) { - printf("PadmeDAQ::create_initfail_file - Directory '%s' does not exist: cannot create InitFail file '%s'.\n",iff_dir,Config->initfail_file); - return 1; - } - - // Check if file is already there (not cleaned in previous run?) - if ( access(Config->initfail_file,F_OK) == -1 ) { - printf("PadmeDAQ::create_initfail_file - InitFail file '%s' already exists.\n",Config->initfail_file); - return 1; - } - - // Create InitFail file - iff = fopen(Config->initfail_file,"w"); - fclose(iff); - printf("- InitFail file '%s' created\n",Config->initfail_file); - return 0; - -} -*/ - // Start of main program int main(int argc, char*argv[]) { @@ -158,11 +41,6 @@ int main(int argc, char*argv[]) // Use line buffering for stdout setlinebuf(stdout); - // Show welcome message - printf("=======================================\n"); - printf("=== Welcome to the PADME DAQ system ===\n"); - printf("=======================================\n"); - // Initialize run configuration if ( init_config() ) { printf("*** ERROR *** Problem initializing run configuration.\n"); @@ -203,6 +81,11 @@ int main(int argc, char*argv[]) abort (); } + // Show welcome message + printf("=======================================\n"); + printf("=== Welcome to the PADME DAQ system ===\n"); + printf("=======================================\n"); + // Show configuration print_config(); @@ -214,64 +97,65 @@ int main(int argc, char*argv[]) } else { printf("*** ERROR *** Problems while creating lock file '%s'. Exiting.\n",Config->lock_file); } - //init_fail(0); create_initfail_file(); exit(1); } - if ( Config->run_number ) { - - // Connect to DB - if ( db_init() != DB_OK ) { - printf("*** ERROR *** Unable to initialize DB connection. Exiting.\n"); - create_initfail_file(); - remove_lock(); - exit(1); - } - - // Verify if run number is valid - rc = db_run_check(Config->run_number); - if ( rc != 1 ) { - if ( rc < 0 ) { - printf("ERROR: DB check for run number %d returned an error\n",Config->run_number); - } else if ( rc == 0 ) { - printf("ERROR: run number %d does not exist in the DB\n",Config->run_number); - } - create_initfail_file(); - remove_lock(); - exit(1); - } - - // Verify if process id is valid - rc = db_process_check(Config->process_id); - if ( rc != 1 ) { - if ( rc < 0 ) { - printf("ERROR: DB check for process id %d returned an error\n",Config->process_id); - } else if ( rc == 0 ) { - printf("ERROR: process id %d does not exist in the DB\n",Config->process_id); - } - create_initfail_file(); - remove_lock(); - exit(1); - } - int status = db_process_get_status(Config->process_id); - if (status!=DB_STATUS_IDLE) { - printf("ERROR: process id %d is not in IDLE (%d) status (status=%d)\n",Config->process_id,DB_STATUS_IDLE,status); - create_initfail_file(); - remove_lock(); - exit(1); - } - - // Save the process configuration to DB -> Now done by RunControl - //save_config(); - - } + //if ( Config->run_number ) { + // + // // Connect to DB + // if ( db_init() != DB_OK ) { + // printf("*** ERROR *** Unable to initialize DB connection. Exiting.\n"); + // create_initfail_file(); + // remove_lock(); + // exit(1); + // } + // + // // Verify if run number is valid + // rc = db_run_check(Config->run_number); + // if ( rc != 1 ) { + // if ( rc < 0 ) { + // printf("ERROR: DB check for run number %d returned an error\n",Config->run_number); + // } else if ( rc == 0 ) { + // printf("ERROR: run number %d does not exist in the DB\n",Config->run_number); + // } + // create_initfail_file(); + // remove_lock(); + // exit(1); + // } + // + // // Verify if process id is valid + // rc = db_process_check(Config->process_id); + // if ( rc != 1 ) { + // if ( rc < 0 ) { + // printf("ERROR: DB check for process id %d returned an error\n",Config->process_id); + // } else if ( rc == 0 ) { + // printf("ERROR: process id %d does not exist in the DB\n",Config->process_id); + // } + // create_initfail_file(); + // remove_lock(); + // exit(1); + // } + // int status = db_process_get_status(Config->process_id); + // if (status!=DB_STATUS_IDLE) { + // printf("ERROR: process id %d is not in IDLE (%d) status (status=%d)\n",Config->process_id,DB_STATUS_IDLE,status); + // create_initfail_file(); + // remove_lock(); + // exit(1); + // } + // + // // Save the process configuration to DB -> Now done by RunControl + // //save_config(); + // + //} // Update process status - if (Config->run_number) { - printf("- Setting process status to INITIALIZING (%d) in DB\n",DB_STATUS_INITIALIZING); - db_process_set_status(Config->process_id,DB_STATUS_INITIALIZING); - } + //if (Config->run_number) { + // printf("- Setting process status to INITIALIZING (%d) in DB\n",DB_STATUS_INITIALIZING); + // db_process_set_status(Config->process_id,DB_STATUS_INITIALIZING); + //} + printf("- Setting process status to INITIALIZING (%d)\n",DB_STATUS_INITIALIZING); + printf("DBINFO - process_set_status %d %d\n",Config->process_id,DB_STATUS_INITIALIZING); // Check current running mode (DAQ, ZSUP, FAKE) @@ -291,10 +175,12 @@ int main(int argc, char*argv[]) if ( DAQ_connect() ) { printf("*** ERROR *** Problem while connecting to V1742 digitizer. Exiting.\n"); create_initfail_file(); - if (Config->run_number) { - printf("- Setting process status to INIT_FAIL (%d) in DB\n",DB_STATUS_INIT_FAIL); - db_process_set_status(Config->process_id,DB_STATUS_INIT_FAIL); - } + //if (Config->run_number) { + // printf("- Setting process status to INIT_FAIL (%d) in DB\n",DB_STATUS_INIT_FAIL); + // db_process_set_status(Config->process_id,DB_STATUS_INIT_FAIL); + //} + printf("- Setting process status to INIT_FAIL (%d)\n",DB_STATUS_INIT_FAIL); + printf("DBINFO - process_set_status %d %d\n",Config->process_id,DB_STATUS_INIT_FAIL); remove_lock(); exit(1); } @@ -304,10 +190,12 @@ int main(int argc, char*argv[]) if ( DAQ_init() ) { printf("*** ERROR *** Problem while initializing V1742 digitizer. Exiting.\n"); create_initfail_file(); - if (Config->run_number) { - printf("- Setting process status to INIT_FAIL (%d) in DB\n",DB_STATUS_INIT_FAIL); - db_process_set_status(Config->process_id,DB_STATUS_INIT_FAIL); - } + //if (Config->run_number) { + // printf("- Setting process status to INIT_FAIL (%d) in DB\n",DB_STATUS_INIT_FAIL); + // db_process_set_status(Config->process_id,DB_STATUS_INIT_FAIL); + //} + printf("- Setting process status to INIT_FAIL (%d)\n",DB_STATUS_INIT_FAIL); + printf("DBINFO - process_set_status %d %d\n",Config->process_id,DB_STATUS_INIT_FAIL); remove_lock(); exit(1); } @@ -317,49 +205,61 @@ int main(int argc, char*argv[]) rc = DAQ_readdata(); if ( rc == 0 ) { printf("=== Run finished ===\n"); - if (Config->run_number) { - printf("- Setting process status to FINISHED (%d) in DB\n",DB_STATUS_FINISHED); - db_process_set_status(Config->process_id,DB_STATUS_FINISHED); - } + //if (Config->run_number) { + // printf("- Setting process status to FINISHED (%d) in DB\n",DB_STATUS_FINISHED); + // db_process_set_status(Config->process_id,DB_STATUS_FINISHED); + //} + printf("- Setting process status to FINISHED (%d) in DB\n",DB_STATUS_FINISHED); + printf("DBINFO - process_set_status %d %d\n",Config->process_id,DB_STATUS_FINISHED); } else if ( rc == 1 ) { printf("*** ERROR *** Problem while initializing DAQ process. Exiting.\n"); create_initfail_file(); - if (Config->run_number) { - printf("- Setting process status to INIT_FAIL (%d) in DB\n",DB_STATUS_INIT_FAIL); - db_process_set_status(Config->process_id,DB_STATUS_INIT_FAIL); - } + //if (Config->run_number) { + // printf("- Setting process status to INIT_FAIL (%d) in DB\n",DB_STATUS_INIT_FAIL); + // db_process_set_status(Config->process_id,DB_STATUS_INIT_FAIL); + //} + printf("- Setting process status to INIT_FAIL (%d)\n",DB_STATUS_INIT_FAIL); + printf("DBINFO - process_set_status %d %d\n",Config->process_id,DB_STATUS_INIT_FAIL); remove_lock(); exit(1); } else if ( rc == 2 ) { printf("*** ERROR *** Data acquistion ended with an error. Please check log file for details. Exiting.\n"); - if (Config->run_number) { - printf("- Setting process status to RUN_FAIL (%d) in DB\n",DB_STATUS_RUN_FAIL); - db_process_set_status(Config->process_id,DB_STATUS_RUN_FAIL); - } + //if (Config->run_number) { + // printf("- Setting process status to RUN_FAIL (%d) in DB\n",DB_STATUS_RUN_FAIL); + // db_process_set_status(Config->process_id,DB_STATUS_RUN_FAIL); + //} + printf("- Setting process status to RUN_FAIL (%d)\n",DB_STATUS_RUN_FAIL); + printf("DBINFO - process_set_status %d %d\n",Config->process_id,DB_STATUS_RUN_FAIL); remove_lock(); exit(1); } else if ( rc == 3 ) { printf("=== Run aborted before starting DAQ ===\n"); - if (Config->run_number) { - printf("- Setting process status to ABORTED (%d) in DB\n",DB_STATUS_ABORTED); - db_process_set_status(Config->process_id,DB_STATUS_ABORTED); - } + //if (Config->run_number) { + // printf("- Setting process status to ABORTED (%d) in DB\n",DB_STATUS_ABORTED); + // db_process_set_status(Config->process_id,DB_STATUS_ABORTED); + //} + printf("- Setting process status to ABORTED (%d)\n",DB_STATUS_ABORTED); + printf("DBINFO - process_set_status %d %d\n",Config->process_id,DB_STATUS_ABORTED); } else { printf("=== DAQ reported unknown return code %d ===\n",rc); - if (Config->run_number) { - printf("- Setting process status to UNKNOWN (%d) in DB\n",DB_STATUS_UNKNOWN); - db_process_set_status(Config->process_id,DB_STATUS_UNKNOWN); - } + //if (Config->run_number) { + // printf("- Setting process status to UNKNOWN (%d) in DB\n",DB_STATUS_UNKNOWN); + // db_process_set_status(Config->process_id,DB_STATUS_UNKNOWN); + //} + printf("- Setting process status to UNKNOWN (%d)\n",DB_STATUS_UNKNOWN); + printf("DBINFO - process_set_status %d %d\n",Config->process_id,DB_STATUS_UNKNOWN); } // Final reset of the digitizer printf("\n=== Reset digitizer and close connection ===\n"); if ( DAQ_close() ) { printf("*** ERROR *** Final reset of digitizer ended with an error. Exiting.\n"); - if (Config->run_number) { - printf("- Setting process status to CLOSE_FAIL (%d) in DB\n",DB_STATUS_CLOSE_FAIL); - db_process_set_status(Config->process_id,DB_STATUS_CLOSE_FAIL); - } + //if (Config->run_number) { + // printf("- Setting process status to CLOSE_FAIL (%d) in DB\n",DB_STATUS_CLOSE_FAIL); + // db_process_set_status(Config->process_id,DB_STATUS_CLOSE_FAIL); + //} + printf("- Setting process status to CLOSE_FAIL (%d)\n",DB_STATUS_CLOSE_FAIL); + printf("DBINFO - process_set_status %d %d\n",Config->process_id,DB_STATUS_CLOSE_FAIL); remove_lock(); exit(1); } @@ -407,51 +307,61 @@ int main(int argc, char*argv[]) rc = ZSUP_readdata(); if ( rc == 0 ) { printf("\n=== ZSUP process ended ===\n"); - if (Config->run_number) { - printf("- Setting process status to FINISHED (%d) in DB\n",DB_STATUS_FINISHED); - db_process_set_status(Config->process_id,DB_STATUS_FINISHED); - } + //if (Config->run_number) { + // printf("- Setting process status to FINISHED (%d) in DB\n",DB_STATUS_FINISHED); + // db_process_set_status(Config->process_id,DB_STATUS_FINISHED); + //} + printf("- Setting process status to FINISHED (%d)\n",DB_STATUS_FINISHED); + printf("DBINFO - process_set_status %d %d\n",Config->process_id,DB_STATUS_FINISHED); } else if ( rc == 1 ) { printf("*** ERROR *** Problem while initializing ZSUP process. Exiting.\n"); create_initfail_file(); - if (Config->run_number) { - printf("- Setting process status to INIT_FAIL (%d) in DB\n",DB_STATUS_INIT_FAIL); - db_process_set_status(Config->process_id,DB_STATUS_INIT_FAIL); - } + //if (Config->run_number) { + // printf("- Setting process status to INIT_FAIL (%d) in DB\n",DB_STATUS_INIT_FAIL); + // db_process_set_status(Config->process_id,DB_STATUS_INIT_FAIL); + //} + printf("- Setting process status to INIT_FAIL (%d)\n",DB_STATUS_INIT_FAIL); + printf("DBINFO - process_set_status %d %d\n",Config->process_id,DB_STATUS_INIT_FAIL); remove_lock(); exit(1); } else if ( rc == 2 ) { printf("*** ERROR *** Zero suppression ended with an error. Please check log file for details. Exiting.\n"); - if (Config->run_number) { - printf("- Setting process status to RUN_FAIL (%d) in DB\n",DB_STATUS_RUN_FAIL); - db_process_set_status(Config->process_id,DB_STATUS_RUN_FAIL); - } + //if (Config->run_number) { + // printf("- Setting process status to RUN_FAIL (%d) in DB\n",DB_STATUS_RUN_FAIL); + // db_process_set_status(Config->process_id,DB_STATUS_RUN_FAIL); + //} + printf("- Setting process status to RUN_FAIL (%d)\n",DB_STATUS_RUN_FAIL); + printf("DBINFO - process_set_status %d %d\n",Config->process_id,DB_STATUS_RUN_FAIL); remove_lock(); exit(1); } else if ( rc == 3 ) { printf("=== Run aborted before starting ZSUP ===\n"); - if (Config->run_number) { - printf("- Setting process status to ABORTED (%d) in DB\n",DB_STATUS_ABORTED); - db_process_set_status(Config->process_id,DB_STATUS_ABORTED); - } + //if (Config->run_number) { + // printf("- Setting process status to ABORTED (%d) in DB\n",DB_STATUS_ABORTED); + // db_process_set_status(Config->process_id,DB_STATUS_ABORTED); + //} + printf("- Setting process status to ABORTED (%d)\n",DB_STATUS_ABORTED); + printf("DBINFO - process_set_status %d %d\n",Config->process_id,DB_STATUS_ABORTED); } else { printf("=== ZSUP reported unknown return code %d ===\n",rc); - if (Config->run_number) { - printf("- Setting process status to UNKNOWN (%d) in DB\n",DB_STATUS_UNKNOWN); - db_process_set_status(Config->process_id,DB_STATUS_UNKNOWN); - } + //if (Config->run_number) { + // printf("- Setting process status to UNKNOWN (%d) in DB\n",DB_STATUS_UNKNOWN); + // db_process_set_status(Config->process_id,DB_STATUS_UNKNOWN); + //} + printf("- Setting process status to UNKNOWN (%d)\n",DB_STATUS_UNKNOWN); + printf("DBINFO - process_set_status %d %d\n",Config->process_id,DB_STATUS_UNKNOWN); } } // Close DB connection - if ( Config->run_number ) { - if ( db_end() != DB_OK ) { - printf("*** ERROR *** DB close procedure ended with an error. Please check log file for details. Exiting.\n"); - remove_lock(); - exit(1); - } - } + //if ( Config->run_number ) { + // if ( db_end() != DB_OK ) { + // printf("*** ERROR *** DB close procedure ended with an error. Please check log file for details. Exiting.\n"); + // remove_lock(); + // exit(1); + // } + //} // Remove lock file remove_lock(); diff --git a/PadmeDAQ/src/ZSUP.c b/PadmeDAQ/src/ZSUP.c index 3f400b9f..26b72172 100644 --- a/PadmeDAQ/src/ZSUP.c +++ b/PadmeDAQ/src/ZSUP.c @@ -18,10 +18,10 @@ #include "ZSUP.h" -#define TIME_TAG_LEN 20 -#define MAX_FILENAME_LEN MAX_DATA_FILE_LEN+TIME_TAG_LEN +//#define TIME_TAG_LEN 20 +//#define MAX_FILENAME_LEN MAX_DATA_FILE_LEN+TIME_TAG_LEN -#define MAX_N_OUTPUT_FILES 10240 +//#define MAX_N_OUTPUT_FILES 10240 extern int InBurst; extern int BreakSignal; @@ -127,16 +127,16 @@ int ZSUP_readdata () generate_filename(tmpName,t_daqstart); fileName[fileIndex] = (char*)malloc(strlen(tmpName)+1); strcpy(fileName[fileIndex],tmpName); - if ( Config->run_number ) { - rc = db_file_check(fileName[fileIndex]); - if ( rc < 0 ) { - printf("ERROR: DB check for file %s returned an error\n",fileName[fileIndex]); - return 2; - } else if ( rc == 1 ) { - printf("ERROR: file %s already exists in the DB\n",fileName[fileIndex]); - return 2; - } - } + //if ( Config->run_number ) { + // rc = db_file_check(fileName[fileIndex]); + // if ( rc < 0 ) { + // printf("ERROR: DB check for file %s returned an error\n",fileName[fileIndex]); + // return 2; + // } else if ( rc == 1 ) { + // printf("ERROR: file %s already exists in the DB\n",fileName[fileIndex]); + // return 2; + // } + //} pathName[fileIndex] = (char*)malloc(strlen(Config->data_dir)+strlen(fileName[fileIndex])+1); strcpy(pathName[fileIndex],Config->data_dir); strcat(pathName[fileIndex],fileName[fileIndex]); @@ -162,9 +162,6 @@ int ZSUP_readdata () return 2; } - // Create initok file to tell RunControl that we are ready - if ( create_initok_file() ) return 1; - // Open virtual file for input data stream (will wait for DAQ to start before proceeding) printf("- Opening input stream from file '%s'\n",Config->input_stream); inFileHandle = open(Config->input_stream,O_RDONLY, S_IRUSR | S_IWUSR); @@ -173,15 +170,10 @@ int ZSUP_readdata () return 1; } - time(&t_daqstart); - printf("%s - Zero suppression started\n",format_time(t_daqstart)); - - if ( Config->run_number ) { - // Tell DB that the process has started - printf("- Setting process status to RUNNING (%d) in DB\n",DB_STATUS_RUNNING); - db_process_set_status(Config->process_id,DB_STATUS_RUNNING); - if ( db_process_open(Config->process_id,t_daqstart) != DB_OK ) return 2; - } + // Create initok file to tell RunControl that we are ready + if ( create_initok_file() ) return 1; + printf("- Setting process status to INITIALIZED (%d)\n",DB_STATUS_INITIALIZED); + printf("DBINFO - process_set_status %d %d\n",Config->process_id,DB_STATUS_INITIALIZED); // Read file header (4 words) from input stream readSize = read(inFileHandle,inEvtBuffer,16); @@ -228,67 +220,45 @@ int ZSUP_readdata () } // Save board serial number to DB for this process - if ( Config->run_number ) { - char outstr[2048]; - sprintf(outstr,"%u",board_sn); - db_add_cfg_para(Config->process_id,"board_sn",outstr); - } + //if ( Config->run_number ) { + // char outstr[2048]; + // sprintf(outstr,"%u",board_sn); + // db_add_cfg_para(Config->process_id,"board_sn",outstr); + //} + printf("DBINFO - add_proc_cfg_para %d %s %u\n",Config->process_id,"board_sn",board_sn); // Fourth line: start of file time tag unsigned int start_time; memcpy(&start_time,inEvtBuffer+12,4); printf("- Start time %s\n",format_time(start_time)); - // Now that we have a recognized input stream we can register the output file in the DB and send it the header - - /* - // Start counting output files - fileIndex = 0; - tooManyOutputFiles = 0; - - if ( strcmp(Config->output_mode,"FILE")==0 ) { - - // Generate name for initial output file and verify it does not exist - generate_filename(tmpName,t_daqstart); - fileName[fileIndex] = (char*)malloc(strlen(tmpName)+1); - strcpy(fileName[fileIndex],tmpName); - if ( Config->run_number ) { - if ( db_file_check(fileName[fileIndex]) != DB_OK ) return 2; - } - pathName[fileIndex] = (char*)malloc(strlen(Config->data_dir)+strlen(fileName[fileIndex])+1); - strcpy(pathName[fileIndex],Config->data_dir); - strcat(pathName[fileIndex],fileName[fileIndex]); - - } else { - - // Use only one virtual file for streaming out all data - pathName[fileIndex] = (char*)malloc(strlen(Config->output_stream)+1); - strcpy(pathName[fileIndex],Config->output_stream); + // Now that we have a recognized input stream we can start the zero suppression process - } + time(&t_daqstart); + printf("%s - Zero suppression started\n",format_time(t_daqstart)); - // Open output file - if ( strcmp(Config->output_mode,"FILE")==0 ) { - printf("- Opening output file %d with path '%s'\n",fileIndex,pathName[fileIndex]); - outFileHandle = open(pathName[fileIndex],O_WRONLY | O_CREAT | O_EXCL, S_IRUSR | S_IWUSR); - } else { - printf("- Opening output stream '%s'\n",pathName[fileIndex]); - outFileHandle = open(pathName[fileIndex],O_WRONLY); - } - if (outFileHandle == -1) { - printf("ERROR - Unable to open file '%s' for writing.\n",pathName[fileIndex]); - return 2; - } + //if ( Config->run_number ) { + // // Tell DB that the process has started + // printf("- Setting process status to RUNNING (%d) in DB\n",DB_STATUS_RUNNING); + // db_process_set_status(Config->process_id,DB_STATUS_RUNNING); + // if ( db_process_open(Config->process_id,t_daqstart) != DB_OK ) return 2; + //} + printf("- Setting process status to RUNNING (%d)\n",DB_STATUS_RUNNING); + printf("DBINFO - process_set_status %d %d\n",Config->process_id,DB_STATUS_RUNNING); - */ + // Register the output file in the DB and send it the header fileTOpen[fileIndex] = t_daqstart; fileSize[fileIndex] = 0; fileEvents[fileIndex] = 0; // Register file in the DB - if ( Config->run_number && strcmp(Config->output_mode,"FILE")==0 ) { - if ( db_file_open(fileName[fileIndex],PEVT_CURRENT_VERSION,fileTOpen[fileIndex],Config->process_id,fileIndex) != DB_OK ) return 2; + //if ( Config->run_number && strcmp(Config->output_mode,"FILE")==0 ) { + // if ( db_file_open(fileName[fileIndex],PEVT_CURRENT_VERSION,fileTOpen[fileIndex],Config->process_id,fileIndex) != DB_OK ) return 2; + //} + if ( strcmp(Config->output_mode,"FILE")==0 ) { + printf("DBINFO - file_create %s %s %d %d %d\n",fileName[fileIndex],"DAQDATA",PEVT_CURRENT_VERSION,Config->process_id,fileIndex); + printf("DBINFO - file_set_time_open %s %s\n",fileName[fileIndex],format_time(fileTOpen[fileIndex])); } // Write header to file @@ -457,9 +427,12 @@ int ZSUP_readdata () fileEvents[fileIndex],fileSize[fileIndex]); // Close file in DB - if ( Config->run_number ) { - if ( db_file_close(fileName[fileIndex],fileTClose[fileIndex],fileSize[fileIndex],fileEvents[fileIndex]) != DB_OK ) return 2; - } + //if ( Config->run_number ) { + // if ( db_file_close(fileName[fileIndex],fileTClose[fileIndex],fileSize[fileIndex],fileEvents[fileIndex]) != DB_OK ) return 2; + //} + printf("DBINFO - file_set_time_close %s %s\n",fileName[fileIndex],format_time(fileTClose[fileIndex])); + printf("DBINFO - file_set_size %s %lu\n",fileName[fileIndex],fileSize[fileIndex]); + printf("DBINFO - file_set_n_events %s %u\n",fileName[fileIndex],fileEvents[fileIndex]); // Update file counter fileIndex++; @@ -470,16 +443,16 @@ int ZSUP_readdata () generate_filename(tmpName,t_now); fileName[fileIndex] = (char*)malloc(strlen(tmpName)+1); strcpy(fileName[fileIndex],tmpName); - if ( Config->run_number ) { - rc = db_file_check(fileName[fileIndex]); - if ( rc < 0 ) { - printf("ERROR: DB check for file %s returned an error\n",fileName[fileIndex]); - return 2; - } else if ( rc == 1 ) { - printf("ERROR: file %s already exists in the DB\n",fileName[fileIndex]); - return 2; - } - } + //if ( Config->run_number ) { + // rc = db_file_check(fileName[fileIndex]); + // if ( rc < 0 ) { + // printf("ERROR: DB check for file %s returned an error\n",fileName[fileIndex]); + // return 2; + // } else if ( rc == 1 ) { + // printf("ERROR: file %s already exists in the DB\n",fileName[fileIndex]); + // return 2; + // } + //} pathName[fileIndex] = (char*)malloc(strlen(Config->data_dir)+strlen(fileName[fileIndex])+1); strcpy(pathName[fileIndex],Config->data_dir); strcat(pathName[fileIndex],fileName[fileIndex]); @@ -494,9 +467,11 @@ int ZSUP_readdata () fileEvents[fileIndex] = 0; // Register file in the DB - if ( Config->run_number ) { - if ( db_file_open(fileName[fileIndex],PEVT_CURRENT_VERSION,fileTOpen[fileIndex],Config->process_id,fileIndex) != DB_OK ) return 2; - } + //if ( Config->run_number ) { + // if ( db_file_open(fileName[fileIndex],PEVT_CURRENT_VERSION,fileTOpen[fileIndex],Config->process_id,fileIndex) != DB_OK ) return 2; + //} + printf("DBINFO - file_create %s %s %d %d %d\n",fileName[fileIndex],"DAQDATA",PEVT_CURRENT_VERSION,Config->process_id,fileIndex); + printf("DBINFO - file_set_time_open %s %s\n",fileName[fileIndex],format_time(fileTOpen[fileIndex])); // Write header to file fHeadSize = create_file_head(fileIndex,run_number,board_id,board_sn,fileTOpen[fileIndex],(void *)outEvtBuffer); @@ -507,6 +482,7 @@ int ZSUP_readdata () return 2; } fileSize[fileIndex] += writeSize; + totalWriteSize += writeSize; } else { @@ -553,6 +529,7 @@ int ZSUP_readdata () return 2; } fileSize[fileIndex] += writeSize; + totalWriteSize += writeSize; // Close output file and show some info about counters if (close(outFileHandle) == -1) { @@ -563,9 +540,12 @@ int ZSUP_readdata () printf("%s - Closed output file '%s' after %d secs with %u events and size %lu bytes\n", format_time(t_now),pathName[fileIndex],(int)(fileTClose[fileIndex]-fileTOpen[fileIndex]), fileEvents[fileIndex],fileSize[fileIndex]); - if ( Config->run_number ) { - if ( db_file_close(fileName[fileIndex],fileTClose[fileIndex],fileSize[fileIndex],fileEvents[fileIndex]) != DB_OK ) return 2; - } + //if ( Config->run_number ) { + // if ( db_file_close(fileName[fileIndex],fileTClose[fileIndex],fileSize[fileIndex],fileEvents[fileIndex]) != DB_OK ) return 2; + //} + printf("DBINFO - file_set_time_close %s %s\n",fileName[fileIndex],format_time(fileTClose[fileIndex])); + printf("DBINFO - file_set_size %s %lu\n",fileName[fileIndex],fileSize[fileIndex]); + printf("DBINFO - file_set_n_events %s %u\n",fileName[fileIndex],fileEvents[fileIndex]); } else { printf("%s - Closed output stream '%s' after %d secs with %u events and size %lu bytes\n", format_time(t_now),pathName[fileIndex],(int)(fileTClose[fileIndex]-fileTOpen[fileIndex]), @@ -583,6 +563,19 @@ int ZSUP_readdata () // Deallocate input/output event buffer free(inEvtBuffer); free(outEvtBuffer); + printf("- Deallocated I/O event buffer\n"); + + // Tell DB that the process has ended + //if ( Config->run_number ) { + // //if ( db_process_close(Config->process_id,t_daqstop) != DB_OK ) return 2; + // if ( db_process_close(Config->process_id,t_daqstop,totalWriteSize,totalWriteEvents) != DB_OK ) return 2; + //} + printf("- Setting process status to FINISHED (%d)\n",DB_STATUS_FINISHED); + printf("DBINFO - process_set_status %d %d\n",Config->process_id,DB_STATUS_FINISHED); + printf("DBINFO - process_set_time_stop %d %s\n",Config->process_id,format_time(t_daqstop)); + printf("DBINFO - process_set_n_files %d %d\n",Config->process_id,fileIndex); + printf("DBINFO - process_set_total_events %d %d\n",Config->process_id,totalWriteEvents); + printf("DBINFO - process_set_total_size %d %ld\n",Config->process_id,totalWriteSize); // Give some final report evtReadPerSec = 0.; @@ -612,17 +605,14 @@ int ZSUP_readdata () } printf("=========================================================\n"); - // Tell DB that the process has ended - if ( Config->run_number ) { - //if ( db_process_close(Config->process_id,t_daqstop) != DB_OK ) return 2; - if ( db_process_close(Config->process_id,t_daqstop,totalWriteSize,totalWriteEvents) != DB_OK ) return 2; - } - // Close DB file //if ( Config->run_number ) { // if ( db_end() != DB_OK ) return 2; //} + // Free space allocated for file names + for(i=0;i Date: Wed, 27 Nov 2019 09:46:54 +0100 Subject: [PATCH 25/64] PadmeDAQ: fixed handling of process status setting --- PadmeDAQ/src/DAQ.c | 4 ++-- PadmeDAQ/src/ZSUP.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/PadmeDAQ/src/DAQ.c b/PadmeDAQ/src/DAQ.c index a0d274f3..a5c5c91b 100644 --- a/PadmeDAQ/src/DAQ.c +++ b/PadmeDAQ/src/DAQ.c @@ -1264,8 +1264,8 @@ int DAQ_readdata () // //if ( db_process_close(Config->process_id,t_daqstop) != DB_OK ) return 2; // if ( db_process_close(Config->process_id,t_daqstop,totalWriteSize,totalWriteEvents) != DB_OK ) return 2; //} - printf("- Setting process status to FINISHED (%d)\n",DB_STATUS_FINISHED); - printf("DBINFO - process_set_status %d %d\n",Config->process_id,DB_STATUS_FINISHED); + //printf("- Setting process status to FINISHED (%d)\n",DB_STATUS_FINISHED); + //printf("DBINFO - process_set_status %d %d\n",Config->process_id,DB_STATUS_FINISHED); printf("DBINFO - process_set_time_stop %d %s\n",Config->process_id,format_time(t_daqstop)); printf("DBINFO - process_set_n_files %d %d\n",Config->process_id,fileIndex); printf("DBINFO - process_set_total_events %d %d\n",Config->process_id,totalWriteEvents); diff --git a/PadmeDAQ/src/ZSUP.c b/PadmeDAQ/src/ZSUP.c index 26b72172..dfad7220 100644 --- a/PadmeDAQ/src/ZSUP.c +++ b/PadmeDAQ/src/ZSUP.c @@ -570,8 +570,8 @@ int ZSUP_readdata () // //if ( db_process_close(Config->process_id,t_daqstop) != DB_OK ) return 2; // if ( db_process_close(Config->process_id,t_daqstop,totalWriteSize,totalWriteEvents) != DB_OK ) return 2; //} - printf("- Setting process status to FINISHED (%d)\n",DB_STATUS_FINISHED); - printf("DBINFO - process_set_status %d %d\n",Config->process_id,DB_STATUS_FINISHED); + //printf("- Setting process status to FINISHED (%d)\n",DB_STATUS_FINISHED); + //printf("DBINFO - process_set_status %d %d\n",Config->process_id,DB_STATUS_FINISHED); printf("DBINFO - process_set_time_stop %d %s\n",Config->process_id,format_time(t_daqstop)); printf("DBINFO - process_set_n_files %d %d\n",Config->process_id,fileIndex); printf("DBINFO - process_set_total_events %d %d\n",Config->process_id,totalWriteEvents); From 21b68f9f281fd0a21072fa30f374b1792d89e6b1 Mon Sep 17 00:00:00 2001 From: Emanuele Leonardi Date: Wed, 27 Nov 2019 10:26:53 +0100 Subject: [PATCH 26/64] RunControl: fixed parameters population in DB --- RunControl/code/ADCBoard.py | 142 ++++++++++++++++++------------------ RunControl/code/Level1.py | 26 +++---- RunControl/code/Merger.py | 22 +++--- RunControl/code/Trigger.py | 97 ++++++++++++------------ 4 files changed, 144 insertions(+), 143 deletions(-) diff --git a/RunControl/code/ADCBoard.py b/RunControl/code/ADCBoard.py index 587b905d..f5e0e636 100644 --- a/RunControl/code/ADCBoard.py +++ b/RunControl/code/ADCBoard.py @@ -334,63 +334,63 @@ def create_proc_daq(self): # Add info about optical link self.db.add_daq_process_optical_link(self.proc_daq_id,self.node_id,self.conet2_link,self.conet2_slot) - self.db.add_cfg_para_daq(self.proc_daq_id,"daq_dir", self.daq_dir) - self.db.add_cfg_para_daq(self.proc_daq_id,"ssh_id_file", self.ssh_id_file) - self.db.add_cfg_para_daq(self.proc_daq_id,"executable", self.executable) - self.db.add_cfg_para_daq(self.proc_daq_id,"start_file", self.start_file) - self.db.add_cfg_para_daq(self.proc_daq_id,"quit_file", self.quit_file) + self.db.add_cfg_para_proc(self.proc_daq_id,"daq_dir", self.daq_dir) + self.db.add_cfg_para_proc(self.proc_daq_id,"ssh_id_file", self.ssh_id_file) + self.db.add_cfg_para_proc(self.proc_daq_id,"executable", self.executable) + self.db.add_cfg_para_proc(self.proc_daq_id,"start_file", self.start_file) + self.db.add_cfg_para_proc(self.proc_daq_id,"quit_file", self.quit_file) - self.db.add_cfg_para_daq(self.proc_daq_id,"run_number", repr(self.run_number)) - self.db.add_cfg_para_daq(self.proc_daq_id,"board_id", repr(self.board_id)) - self.db.add_cfg_para_daq(self.proc_daq_id,"process_mode", self.process_mode) + #self.db.add_cfg_para_proc(self.proc_daq_id,"run_number", repr(self.run_number)) + self.db.add_cfg_para_proc(self.proc_daq_id,"board_id", repr(self.board_id)) + self.db.add_cfg_para_proc(self.proc_daq_id,"process_mode", self.process_mode) - self.db.add_cfg_para_daq(self.proc_daq_id,"node_id", repr(self.node_id)) - self.db.add_cfg_para_daq(self.proc_daq_id,"node_ip", self.node_ip) - self.db.add_cfg_para_daq(self.proc_daq_id,"conet2_link", repr(self.conet2_link)) - self.db.add_cfg_para_daq(self.proc_daq_id,"conet2_slot", repr(self.conet2_slot)) + #self.db.add_cfg_para_proc(self.proc_daq_id,"node_id", repr(self.node_id)) + self.db.add_cfg_para_proc(self.proc_daq_id,"node_ip", self.node_ip) + self.db.add_cfg_para_proc(self.proc_daq_id,"conet2_link", repr(self.conet2_link)) + self.db.add_cfg_para_proc(self.proc_daq_id,"conet2_slot", repr(self.conet2_slot)) - self.db.add_cfg_para_daq(self.proc_daq_id,"config_file", self.config_file_daq) - self.db.add_cfg_para_daq(self.proc_daq_id,"log_file", self.log_file_daq) - self.db.add_cfg_para_daq(self.proc_daq_id,"lock_file", self.lock_file_daq) - self.db.add_cfg_para_daq(self.proc_daq_id,"initok_file", self.initok_file_daq) - self.db.add_cfg_para_daq(self.proc_daq_id,"initfail_file", self.initfail_file_daq) + self.db.add_cfg_para_proc(self.proc_daq_id,"config_file", self.config_file_daq) + self.db.add_cfg_para_proc(self.proc_daq_id,"log_file", self.log_file_daq) + self.db.add_cfg_para_proc(self.proc_daq_id,"lock_file", self.lock_file_daq) + self.db.add_cfg_para_proc(self.proc_daq_id,"initok_file", self.initok_file_daq) + self.db.add_cfg_para_proc(self.proc_daq_id,"initfail_file", self.initfail_file_daq) - self.db.add_cfg_para_daq(self.proc_daq_id,"output_mode", self.output_mode_daq) + self.db.add_cfg_para_proc(self.proc_daq_id,"output_mode", self.output_mode_daq) if self.output_mode_daq == "STREAM": - self.db.add_cfg_para_daq(self.proc_daq_id,"output_stream", self.output_stream_daq) + self.db.add_cfg_para_proc(self.proc_daq_id,"output_stream", self.output_stream_daq) elif self.output_mode_daq == "FILE": - self.db.add_cfg_para_trigger(self.proc_daq_id,"data_dir", self.data_dir_daq) - self.db.add_cfg_para_trigger(self.proc_daq_id,"data_file", self.data_file_daq) - self.db.add_cfg_para_trigger(self.proc_daq_id,"file_max_duration", self.file_max_duration) - self.db.add_cfg_para_trigger(self.proc_daq_id,"file_max_size", self.file_max_size) - self.db.add_cfg_para_trigger(self.proc_daq_id,"file_max_events", self.file_max_events) + self.db.add_cfg_para_proc(self.proc_daq_id,"data_dir", self.data_dir_daq) + self.db.add_cfg_para_proc(self.proc_daq_id,"data_file", self.data_file_daq) + self.db.add_cfg_para_proc(self.proc_daq_id,"file_max_duration", self.file_max_duration) + self.db.add_cfg_para_proc(self.proc_daq_id,"file_max_size", self.file_max_size) + self.db.add_cfg_para_proc(self.proc_daq_id,"file_max_events", self.file_max_events) - self.db.add_cfg_para_daq(self.proc_daq_id,"total_daq_time", repr(self.total_daq_time)) + self.db.add_cfg_para_proc(self.proc_daq_id,"total_daq_time", repr(self.total_daq_time)) - self.db.add_cfg_para_daq(self.proc_daq_id,"startdaq_mode", repr(self.startdaq_mode)) - self.db.add_cfg_para_daq(self.proc_daq_id,"trigger_mode", repr(self.trigger_mode)) - self.db.add_cfg_para_daq(self.proc_daq_id,"trigger_iolevel", self.trigger_iolevel) + self.db.add_cfg_para_proc(self.proc_daq_id,"startdaq_mode", repr(self.startdaq_mode)) + self.db.add_cfg_para_proc(self.proc_daq_id,"trigger_mode", repr(self.trigger_mode)) + self.db.add_cfg_para_proc(self.proc_daq_id,"trigger_iolevel", self.trigger_iolevel) - self.db.add_cfg_para_daq(self.proc_daq_id,"group_enable_mask", "%#1x"%self.group_enable_mask) - self.db.add_cfg_para_daq(self.proc_daq_id,"channel_enable_mask","%#08x"%self.channel_enable_mask) + self.db.add_cfg_para_proc(self.proc_daq_id,"group_enable_mask", "%#1x"%self.group_enable_mask) + self.db.add_cfg_para_proc(self.proc_daq_id,"channel_enable_mask","%#08x"%self.channel_enable_mask) - self.db.add_cfg_para_daq(self.proc_daq_id,"offset_global", "%#04x"%self.proc_daq_id) + self.db.add_cfg_para_proc(self.proc_daq_id,"offset_global", "%#04x"%self.proc_daq_id) for ch in range(32): if (self.offset_ch[ch] != self.offset_global): - self.db.add_cfg_para_daq(self.proc_daq_id,"offset_ch", "%d %#04x"%(ch,self.offset_ch[ch])) + self.db.add_cfg_para_proc(self.proc_daq_id,"offset_ch", "%d %#04x"%(ch,self.offset_ch[ch])) - self.db.add_cfg_para_daq(self.proc_daq_id,"post_trigger_size", repr(self.post_trigger_size)) - self.db.add_cfg_para_daq(self.proc_daq_id,"max_num_events_blt", repr(self.max_num_events_blt)) + self.db.add_cfg_para_proc(self.proc_daq_id,"post_trigger_size", repr(self.post_trigger_size)) + self.db.add_cfg_para_proc(self.proc_daq_id,"max_num_events_blt", repr(self.max_num_events_blt)) - self.db.add_cfg_para_daq(self.proc_daq_id,"drs4corr_enable", repr(self.drs4corr_enable)) - self.db.add_cfg_para_daq(self.proc_daq_id,"drs4_sampfreq", repr(self.drs4_sampfreq)) + self.db.add_cfg_para_proc(self.proc_daq_id,"drs4corr_enable", repr(self.drs4corr_enable)) + self.db.add_cfg_para_proc(self.proc_daq_id,"drs4_sampfreq", repr(self.drs4_sampfreq)) - self.db.add_cfg_para_daq(self.proc_daq_id,"auto_threshold", "%#04x"%self.auto_threshold) - self.db.add_cfg_para_daq(self.proc_daq_id,"auto_duration", repr(self.auto_duration)) + self.db.add_cfg_para_proc(self.proc_daq_id,"auto_threshold", "%#04x"%self.auto_threshold) + self.db.add_cfg_para_proc(self.proc_daq_id,"auto_duration", repr(self.auto_duration)) - self.db.add_cfg_para_daq(self.proc_daq_id,"daq_loop_delay", repr(self.daq_loop_delay)) + self.db.add_cfg_para_proc(self.proc_daq_id,"daq_loop_delay", repr(self.daq_loop_delay)) - self.db.add_cfg_para_daq(self.proc_daq_id,"debug_scale", repr(self.debug_scale)) + self.db.add_cfg_para_proc(self.proc_daq_id,"debug_scale", repr(self.debug_scale)) return "ok" @@ -402,50 +402,50 @@ def create_proc_zsup(self): print "ADCBoard::create_proc_zsup - ERROR: unable to create new ZSUP proces in DB" return "error" - self.db.add_cfg_para_daq(self.proc_zsup_id,"daq_dir", self.daq_dir) - self.db.add_cfg_para_daq(self.proc_zsup_id,"ssh_id_file", self.ssh_id_file) - self.db.add_cfg_para_daq(self.proc_zsup_id,"executable", self.executable) + self.db.add_cfg_para_proc(self.proc_zsup_id,"daq_dir", self.daq_dir) + self.db.add_cfg_para_proc(self.proc_zsup_id,"ssh_id_file", self.ssh_id_file) + self.db.add_cfg_para_proc(self.proc_zsup_id,"executable", self.executable) - self.db.add_cfg_para_daq(self.proc_zsup_id,"run_number", repr(self.run_number)) - self.db.add_cfg_para_daq(self.proc_zsup_id,"board_id", repr(self.board_id)) - self.db.add_cfg_para_daq(self.proc_zsup_id,"process_mode", "ZSUP") + #self.db.add_cfg_para_proc(self.proc_zsup_id,"run_number", repr(self.run_number)) + self.db.add_cfg_para_proc(self.proc_zsup_id,"board_id", repr(self.board_id)) + self.db.add_cfg_para_proc(self.proc_zsup_id,"process_mode", "ZSUP") - self.db.add_cfg_para_daq(self.proc_zsup_id,"node_id", repr(self.node_id)) - self.db.add_cfg_para_daq(self.proc_zsup_id,"node_ip", self.node_ip) + #self.db.add_cfg_para_proc(self.proc_zsup_id,"node_id", repr(self.node_id)) + self.db.add_cfg_para_proc(self.proc_zsup_id,"node_ip", self.node_ip) - self.db.add_cfg_para_daq(self.proc_zsup_id,"config_file", self.config_file_zsup) - self.db.add_cfg_para_daq(self.proc_zsup_id,"log_file", self.log_file_zsup) - self.db.add_cfg_para_daq(self.proc_zsup_id,"lock_file", self.lock_file_zsup) - self.db.add_cfg_para_daq(self.proc_zsup_id,"initok_file", self.initok_file_zsup) - self.db.add_cfg_para_daq(self.proc_zsup_id,"initfail_file", self.initfail_file_zsup) + self.db.add_cfg_para_proc(self.proc_zsup_id,"config_file", self.config_file_zsup) + self.db.add_cfg_para_proc(self.proc_zsup_id,"log_file", self.log_file_zsup) + self.db.add_cfg_para_proc(self.proc_zsup_id,"lock_file", self.lock_file_zsup) + self.db.add_cfg_para_proc(self.proc_zsup_id,"initok_file", self.initok_file_zsup) + self.db.add_cfg_para_proc(self.proc_zsup_id,"initfail_file", self.initfail_file_zsup) - self.db.add_cfg_para_daq(self.proc_zsup_id,"output_mode", self.output_mode_zsup) + self.db.add_cfg_para_proc(self.proc_zsup_id,"output_mode", self.output_mode_zsup) if self.output_mode_zsup == "STREAM": - self.db.add_cfg_para_daq(self.proc_zsup_id,"output_stream", self.output_stream_zsup) + self.db.add_cfg_para_proc(self.proc_zsup_id,"output_stream", self.output_stream_zsup) elif self.output_mode_zsup == "FILE": - self.db.add_cfg_para_trigger(self.proc_zsup_id,"data_dir", self.data_dir_zsup) - self.db.add_cfg_para_trigger(self.proc_zsup_id,"data_file", self.data_file_zsup) - self.db.add_cfg_para_trigger(self.proc_zsup_id,"file_max_duration", self.file_max_duration) - self.db.add_cfg_para_trigger(self.proc_zsup_id,"file_max_size", self.file_max_size) - self.db.add_cfg_para_trigger(self.proc_zsup_id,"file_max_events", self.file_max_events) + self.db.add_cfg_para_proc(self.proc_zsup_id,"data_dir", self.data_dir_zsup) + self.db.add_cfg_para_proc(self.proc_zsup_id,"data_file", self.data_file_zsup) + self.db.add_cfg_para_proc(self.proc_zsup_id,"file_max_duration", self.file_max_duration) + self.db.add_cfg_para_proc(self.proc_zsup_id,"file_max_size", self.file_max_size) + self.db.add_cfg_para_proc(self.proc_zsup_id,"file_max_events", self.file_max_events) - self.db.add_cfg_para_daq(self.proc_zsup_id,"input_stream", self.input_stream_zsup) + self.db.add_cfg_para_proc(self.proc_zsup_id,"input_stream", self.input_stream_zsup) - self.db.add_cfg_para_daq(self.proc_zsup_id,"zero_suppression", repr(self.zero_suppression)) + self.db.add_cfg_para_proc(self.proc_zsup_id,"zero_suppression", repr(self.zero_suppression)) if (self.zero_suppression%100 == 1): - self.db.add_cfg_para_daq(self.proc_zsup_id,"zs1_head", repr(self.zs1_head)) - self.db.add_cfg_para_daq(self.proc_zsup_id,"zs1_tail", repr(self.zs1_tail)) - self.db.add_cfg_para_daq(self.proc_zsup_id,"zs1_nsigma", repr(self.zs1_nsigma)) - self.db.add_cfg_para_daq(self.proc_zsup_id,"zs1_nabovethr", repr(self.zs1_nabovethr)) - self.db.add_cfg_para_daq(self.proc_zsup_id,"zs1_badrmsthr", repr(self.zs1_badrmsthr)) + self.db.add_cfg_para_proc(self.proc_zsup_id,"zs1_head", repr(self.zs1_head)) + self.db.add_cfg_para_proc(self.proc_zsup_id,"zs1_tail", repr(self.zs1_tail)) + self.db.add_cfg_para_proc(self.proc_zsup_id,"zs1_nsigma", repr(self.zs1_nsigma)) + self.db.add_cfg_para_proc(self.proc_zsup_id,"zs1_nabovethr", repr(self.zs1_nabovethr)) + self.db.add_cfg_para_proc(self.proc_zsup_id,"zs1_badrmsthr", repr(self.zs1_badrmsthr)) elif (self.zero_suppression%100 == 2): - self.db.add_cfg_para_daq(self.proc_zsup_id,"zs2_tail", repr(self.zs2_tail)) - self.db.add_cfg_para_daq(self.proc_zsup_id,"zs2_minrms", repr(self.zs2_minrms)) + self.db.add_cfg_para_proc(self.proc_zsup_id,"zs2_tail", repr(self.zs2_tail)) + self.db.add_cfg_para_proc(self.proc_zsup_id,"zs2_minrms", repr(self.zs2_minrms)) for ch in range(32): if (self.zs2_minrms_ch[ch] != self.zs2_minrms): - self.db.add_cfg_para_daq(self.proc_zsup_id,"zs2_minrms_ch", "%d %d"%(ch,self.zs2_minrms_ch[ch])) + self.db.add_cfg_para_proc(self.proc_zsup_id,"zs2_minrms_ch","%d %d"%(ch,self.zs2_minrms_ch[ch])) - self.db.add_cfg_para_daq(self.proc_zsup_id,"debug_scale", repr(self.debug_scale)) + self.db.add_cfg_para_procself.proc_zsup_id,"debug_scale", repr(self.debug_scale)) return "ok" diff --git a/RunControl/code/Level1.py b/RunControl/code/Level1.py index 9a5c97db..a607c17d 100644 --- a/RunControl/code/Level1.py +++ b/RunControl/code/Level1.py @@ -86,24 +86,24 @@ def create_level1(self): print "Level1::create_level1 - ERROR: unable to create new Level1 process in DB" return "error" - self.db.add_cfg_para_level1(self.process_id,"daq_dir", self.daq_dir) - self.db.add_cfg_para_level1(self.process_id,"ssh_id_file", self.ssh_id_file) - self.db.add_cfg_para_level1(self.process_id,"executable", self.executable) + self.db.add_cfg_para_proc(self.process_id,"daq_dir", self.daq_dir) + self.db.add_cfg_para_proc(self.process_id,"ssh_id_file", self.ssh_id_file) + self.db.add_cfg_para_proc(self.process_id,"executable", self.executable) - #self.db.add_cfg_para_level1(self.process_id,"run_number", repr(self.run_number)) - self.db.add_cfg_para_level1(self.process_id,"level1_id", repr(self.level1_id)) + #self.db.add_cfg_para_proc(self.process_id,"run_number", repr(self.run_number)) + self.db.add_cfg_para_proc(self.process_id,"level1_id", repr(self.level1_id)) - #self.db.add_cfg_para_level1(self.process_id,"node_id", repr(self.node_id)) - self.db.add_cfg_para_level1(self.process_id,"node_ip", self.node_ip) + #self.db.add_cfg_para_proc(self.process_id,"node_id", repr(self.node_id)) + self.db.add_cfg_para_proc(self.process_id,"node_ip", self.node_ip) - self.db.add_cfg_para_level1(self.process_id,"config_file", self.config_file) - self.db.add_cfg_para_level1(self.process_id,"log_file", self.log_file) + self.db.add_cfg_para_proc(self.process_id,"config_file", self.config_file) + self.db.add_cfg_para_proc(self.process_id,"log_file", self.log_file) - self.db.add_cfg_para_level1(self.process_id,"input_stream", self.input_stream) - self.db.add_cfg_para_level1(self.process_id,"output_dir", self.output_dir) - self.db.add_cfg_para_level1(self.process_id,"output_header",self.output_header) + self.db.add_cfg_para_proc(self.process_id,"input_stream", self.input_stream) + self.db.add_cfg_para_proc(self.process_id,"output_dir", self.output_dir) + self.db.add_cfg_para_proc(self.process_id,"output_header",self.output_header) - self.db.add_cfg_para_level1(self.process_id,"max_events", repr(self.max_events)) + self.db.add_cfg_para_proc(self.process_id,"max_events", repr(self.max_events)) return "ok" diff --git a/RunControl/code/Merger.py b/RunControl/code/Merger.py index 3f5bb137..d01175cc 100644 --- a/RunControl/code/Merger.py +++ b/RunControl/code/Merger.py @@ -83,22 +83,22 @@ def create_merger(self): print "Merger::create_merger - ERROR: unable to create new Merger process in DB" return "error" - self.db.add_cfg_para_merger(self.process_id,"daq_dir", self.daq_dir) - self.db.add_cfg_para_merger(self.process_id,"ssh_id_file",self.ssh_id_file) - self.db.add_cfg_para_merger(self.process_id,"executable", self.executable) + self.db.add_cfg_para_proc(self.process_id,"daq_dir", self.daq_dir) + self.db.add_cfg_para_proc(self.process_id,"ssh_id_file",self.ssh_id_file) + self.db.add_cfg_para_proc(self.process_id,"executable", self.executable) - #self.db.add_cfg_para_merger(self.process_id,"run_number", repr(self.run_number)) + #self.db.add_cfg_para_proc(self.process_id,"run_number", repr(self.run_number)) - #self.db.add_cfg_para_merger(self.process_id,"node_id", repr(self.node_id)) - self.db.add_cfg_para_merger(self.process_id,"node_ip", self.node_ip) + #self.db.add_cfg_para_proc(self.process_id,"node_id", repr(self.node_id)) + self.db.add_cfg_para_proc(self.process_id,"node_ip", self.node_ip) - self.db.add_cfg_para_merger(self.process_id,"config_file",self.config_file) - self.db.add_cfg_para_merger(self.process_id,"log_file", self.log_file) + self.db.add_cfg_para_proc(self.process_id,"config_file",self.config_file) + self.db.add_cfg_para_proc(self.process_id,"log_file", self.log_file) - #self.db.add_cfg_para_merger(self.process_id,"output_mode",self.output_mode) + #self.db.add_cfg_para_proc(self.process_id,"output_mode",self.output_mode) - self.db.add_cfg_para_merger(self.process_id,"input_list", self.input_list) - self.db.add_cfg_para_merger(self.process_id,"output_list",self.output_list) + self.db.add_cfg_para_proc(self.process_id,"input_list", self.input_list) + self.db.add_cfg_para_proc(self.process_id,"output_list",self.output_list) return "ok" diff --git a/RunControl/code/Trigger.py b/RunControl/code/Trigger.py index edd8adc7..e51d400d 100644 --- a/RunControl/code/Trigger.py +++ b/RunControl/code/Trigger.py @@ -259,83 +259,84 @@ def create_trigger(self): print "Trigger::create_trigger - ERROR: unable to create new Trigger process in DB" return "error" - self.db.add_cfg_para_trigger(self.process_id,"daq_dir", self.daq_dir) - self.db.add_cfg_para_trigger(self.process_id,"ssh_id_file", self.ssh_id_file) - self.db.add_cfg_para_trigger(self.process_id,"executable", self.executable) - self.db.add_cfg_para_trigger(self.process_id,"start_file", self.start_file) - self.db.add_cfg_para_trigger(self.process_id,"quit_file", self.quit_file) + self.db.add_cfg_para_proc(self.process_id,"daq_dir", self.daq_dir) + self.db.add_cfg_para_proc(self.process_id,"ssh_id_file", self.ssh_id_file) + self.db.add_cfg_para_proc(self.process_id,"executable", self.executable) + self.db.add_cfg_para_proc(self.process_id,"start_file", self.start_file) + self.db.add_cfg_para_proc(self.process_id,"quit_file", self.quit_file) - #self.db.add_cfg_para_trigger(self.process_id,"run_number", repr(self.run_number)) - #self.db.add_cfg_para_trigger(self.process_id,"process_mode", self.process_mode) + #self.db.add_cfg_para_proc(self.process_id,"run_number", repr(self.run_number)) + #self.db.add_cfg_para_proc(self.process_id,"process_mode", self.process_mode) - #self.db.add_cfg_para_trigger(self.process_id,"node_id", repr(self.node_id)) - self.db.add_cfg_para_trigger(self.process_id,"node_ip", self.node_ip) + #self.db.add_cfg_para_proc(self.process_id,"node_id", repr(self.node_id)) + self.db.add_cfg_para_proc(self.process_id,"node_ip", self.node_ip) - self.db.add_cfg_para_trigger(self.process_id,"config_file", self.config_file) - self.db.add_cfg_para_trigger(self.process_id,"log_file", self.log_file) - self.db.add_cfg_para_trigger(self.process_id,"lock_file", self.lock_file) - self.db.add_cfg_para_trigger(self.process_id,"initok_file", self.initok_file) - self.db.add_cfg_para_trigger(self.process_id,"initfail_file", self.initfail_file) + self.db.add_cfg_para_proc(self.process_id,"config_file", self.config_file) + self.db.add_cfg_para_proc(self.process_id,"log_file", self.log_file) + self.db.add_cfg_para_proc(self.process_id,"lock_file", self.lock_file) + self.db.add_cfg_para_proc(self.process_id,"initok_file", self.initok_file) + self.db.add_cfg_para_proc(self.process_id,"initfail_file", self.initfail_file) - self.db.add_cfg_para_trigger(self.process_id,"output_mode", self.output_mode) + self.db.add_cfg_para_proc(self.process_id,"output_mode", self.output_mode) if self.output_mode == "STREAM": - self.db.add_cfg_para_trigger(self.process_id,"output_stream", self.output_stream) + self.db.add_cfg_para_proc(self.process_id,"output_stream", self.output_stream) elif self.output_mode == "FILE": - self.db.add_cfg_para_trigger(self.process_id,"data_dir", self.data_dir) - self.db.add_cfg_para_trigger(self.process_id,"data_file", self.data_file) - self.db.add_cfg_para_trigger(self.process_id,"file_max_duration", self.file_max_duration) - self.db.add_cfg_para_trigger(self.process_id,"file_max_size", self.file_max_size) - self.db.add_cfg_para_trigger(self.process_id,"file_max_events", self.file_max_events) + self.db.add_cfg_para_proc(self.process_id,"data_dir", self.data_dir) + self.db.add_cfg_para_proc(self.process_id,"data_file", self.data_file) + self.db.add_cfg_para_proc(self.process_id,"file_max_duration", self.file_max_duration) + self.db.add_cfg_para_proc(self.process_id,"file_max_size", self.file_max_size) + self.db.add_cfg_para_proc(self.process_id,"file_max_events", self.file_max_events) - self.db.add_cfg_para_trigger(self.process_id,"total_daq_time", repr(self.total_daq_time)) + self.db.add_cfg_para_proc(self.process_id,"total_daq_time", repr(self.total_daq_time)) - self.db.add_cfg_para_trigger(self.process_id,"trigger_addr", self.trigger_addr) - self.db.add_cfg_para_trigger(self.process_id,"trigger_port", repr(self.trigger_port)) + self.db.add_cfg_para_proc(self.process_id,"trigger_addr", self.trigger_addr) + self.db.add_cfg_para_proc(self.process_id,"trigger_port", repr(self.trigger_port)) - self.db.add_cfg_para_trigger(self.process_id,"trigger_mask", "%#02x"%self.trigger_mask) - self.db.add_cfg_para_trigger(self.process_id,"busy_mask", "%#02x"%self.busy_mask) + self.db.add_cfg_para_proc(self.process_id,"trigger_mask", "%#02x"%self.trigger_mask) + self.db.add_cfg_para_proc(self.process_id,"busy_mask", "%#02x"%self.busy_mask) - self.db.add_cfg_para_trigger(self.process_id,"trigger0_delay", "%#02x"%self.trigger0_delay) + self.db.add_cfg_para_proc(self.process_id,"trigger0_delay", "%#02x"%self.trigger0_delay) + + self.db.add_cfg_para_proc(self.process_id,"correlated_trigger_delay", "%#04x"%self.correlated_trigger_delay) - self.db.add_cfg_para_trigger(self.process_id,"correlated_trigger_delay", "%#04x"%self.correlated_trigger_delay) if (self.trigger_mask & 0x01): - #self.db.add_cfg_para_trigger(self.process_id,"trig0_scale_global", repr(self.trig0_scale_global)) - self.db.add_cfg_para_trigger(self.process_id,"trig0_scale_autopass", repr(self.trig0_scale_autopass)) + #self.db.add_cfg_para_proc(self.process_id,"trig0_scale_global", repr(self.trig0_scale_global)) + self.db.add_cfg_para_proc(self.process_id,"trig0_scale_autopass", repr(self.trig0_scale_autopass)) if (self.trigger_mask & 0x02): - self.db.add_cfg_para_trigger(self.process_id,"trig1_scale_global", repr(self.trig1_scale_global)) - self.db.add_cfg_para_trigger(self.process_id,"trig1_scale_autopass", repr(self.trig1_scale_autopass)) + self.db.add_cfg_para_proc(self.process_id,"trig1_scale_global", repr(self.trig1_scale_global)) + self.db.add_cfg_para_proc(self.process_id,"trig1_scale_autopass", repr(self.trig1_scale_autopass)) if (self.trigger_mask & 0x04): - self.db.add_cfg_para_trigger(self.process_id,"trig2_scale_global", repr(self.trig2_scale_global)) - self.db.add_cfg_para_trigger(self.process_id,"trig2_scale_autopass", repr(self.trig2_scale_autopass)) + self.db.add_cfg_para_proc(self.process_id,"trig2_scale_global", repr(self.trig2_scale_global)) + self.db.add_cfg_para_proc(self.process_id,"trig2_scale_autopass", repr(self.trig2_scale_autopass)) if (self.trigger_mask & 0x08): - self.db.add_cfg_para_trigger(self.process_id,"trig3_scale_global", repr(self.trig3_scale_global)) - self.db.add_cfg_para_trigger(self.process_id,"trig3_scale_autopass", repr(self.trig3_scale_autopass)) + self.db.add_cfg_para_proc(self.process_id,"trig3_scale_global", repr(self.trig3_scale_global)) + self.db.add_cfg_para_proc(self.process_id,"trig3_scale_autopass", repr(self.trig3_scale_autopass)) if (self.trigger_mask & 0x10): - self.db.add_cfg_para_trigger(self.process_id,"trig4_scale_global", repr(self.trig4_scale_global)) - self.db.add_cfg_para_trigger(self.process_id,"trig4_scale_autopass", repr(self.trig4_scale_autopass)) + self.db.add_cfg_para_proc(self.process_id,"trig4_scale_global", repr(self.trig4_scale_global)) + self.db.add_cfg_para_proc(self.process_id,"trig4_scale_autopass", repr(self.trig4_scale_autopass)) if (self.trigger_mask & 0x20): - self.db.add_cfg_para_trigger(self.process_id,"trig5_scale_global", repr(self.trig5_scale_global)) - self.db.add_cfg_para_trigger(self.process_id,"trig5_scale_autopass", repr(self.trig5_scale_autopass)) + self.db.add_cfg_para_proc(self.process_id,"trig5_scale_global", repr(self.trig5_scale_global)) + self.db.add_cfg_para_proc(self.process_id,"trig5_scale_autopass", repr(self.trig5_scale_autopass)) if (self.trigger_mask & 0x40): - self.db.add_cfg_para_trigger(self.process_id,"trig6_scale_global", repr(self.trig6_scale_global)) - self.db.add_cfg_para_trigger(self.process_id,"trig6_scale_autopass", repr(self.trig6_scale_autopass)) + self.db.add_cfg_para_proc(self.process_id,"trig6_scale_global", repr(self.trig6_scale_global)) + self.db.add_cfg_para_proc(self.process_id,"trig6_scale_autopass", repr(self.trig6_scale_autopass)) if (self.trigger_mask & 0x80): - self.db.add_cfg_para_trigger(self.process_id,"trig7_scale_global", repr(self.trig7_scale_global)) - self.db.add_cfg_para_trigger(self.process_id,"trig7_scale_autopass", repr(self.trig7_scale_autopass)) + self.db.add_cfg_para_proc(self.process_id,"trig7_scale_global", repr(self.trig7_scale_global)) + self.db.add_cfg_para_proc(self.process_id,"trig7_scale_autopass", repr(self.trig7_scale_autopass)) - self.db.add_cfg_para_trigger(self.process_id,"timepix_shutter_delay", "%#02x"%self.timepix_shutter_delay) - self.db.add_cfg_para_trigger(self.process_id,"timepix_shutter_width", "%#02x"%self.timepix_shutter_width) + self.db.add_cfg_para_proc(self.process_id,"timepix_shutter_delay", "%#02x"%self.timepix_shutter_delay) + self.db.add_cfg_para_proc(self.process_id,"timepix_shutter_width", "%#02x"%self.timepix_shutter_width) - self.db.add_cfg_para_trigger(self.process_id,"daq_loop_delay", repr(self.daq_loop_delay)) + self.db.add_cfg_para_proc(self.process_id,"daq_loop_delay", repr(self.daq_loop_delay)) - self.db.add_cfg_para_trigger(self.process_id,"debug_scale", repr(self.debug_scale)) + self.db.add_cfg_para_proc(self.process_id,"debug_scale", repr(self.debug_scale)) return "ok" From 086b50992e992feac154ec3e81c873ae6db11f65 Mon Sep 17 00:00:00 2001 From: Emanuele Leonardi Date: Wed, 27 Nov 2019 14:16:43 +0100 Subject: [PATCH 27/64] PadmeDAQ,PadmeTrig: fix info sent to DB --- PadmeDAQ/src/DAQ.c | 4 +++- PadmeDAQ/src/ZSUP.c | 4 +++- PadmeTrig/src/PadmeTrig.c | 5 +++-- 3 files changed, 9 insertions(+), 4 deletions(-) diff --git a/PadmeDAQ/src/DAQ.c b/PadmeDAQ/src/DAQ.c index a5c5c91b..0c30bb8b 100644 --- a/PadmeDAQ/src/DAQ.c +++ b/PadmeDAQ/src/DAQ.c @@ -1267,9 +1267,11 @@ int DAQ_readdata () //printf("- Setting process status to FINISHED (%d)\n",DB_STATUS_FINISHED); //printf("DBINFO - process_set_status %d %d\n",Config->process_id,DB_STATUS_FINISHED); printf("DBINFO - process_set_time_stop %d %s\n",Config->process_id,format_time(t_daqstop)); - printf("DBINFO - process_set_n_files %d %d\n",Config->process_id,fileIndex); printf("DBINFO - process_set_total_events %d %d\n",Config->process_id,totalWriteEvents); printf("DBINFO - process_set_total_size %d %ld\n",Config->process_id,totalWriteSize); + if ( strcmp(Config->output_mode,"FILE")==0 ) { + printf("DBINFO - process_set_n_files %d %d\n",Config->process_id,fileIndex); + } // Give some final report evtReadPerSec = 0.; diff --git a/PadmeDAQ/src/ZSUP.c b/PadmeDAQ/src/ZSUP.c index dfad7220..fc319a51 100644 --- a/PadmeDAQ/src/ZSUP.c +++ b/PadmeDAQ/src/ZSUP.c @@ -573,9 +573,11 @@ int ZSUP_readdata () //printf("- Setting process status to FINISHED (%d)\n",DB_STATUS_FINISHED); //printf("DBINFO - process_set_status %d %d\n",Config->process_id,DB_STATUS_FINISHED); printf("DBINFO - process_set_time_stop %d %s\n",Config->process_id,format_time(t_daqstop)); - printf("DBINFO - process_set_n_files %d %d\n",Config->process_id,fileIndex); printf("DBINFO - process_set_total_events %d %d\n",Config->process_id,totalWriteEvents); printf("DBINFO - process_set_total_size %d %ld\n",Config->process_id,totalWriteSize); + if ( strcmp(Config->output_mode,"FILE")==0 ) { + printf("DBINFO - process_set_n_files %d %d\n",Config->process_id,fileIndex); + } // Give some final report evtReadPerSec = 0.; diff --git a/PadmeTrig/src/PadmeTrig.c b/PadmeTrig/src/PadmeTrig.c index d8b4d230..77bbb760 100644 --- a/PadmeTrig/src/PadmeTrig.c +++ b/PadmeTrig/src/PadmeTrig.c @@ -1090,10 +1090,11 @@ int main(int argc, char *argv[]) { printf("- Setting process status to FINISHED (%d)\n",DB_STATUS_FINISHED); printf("DBINFO - process_set_status %d %d\n",Config->process_id,DB_STATUS_FINISHED); printf("DBINFO - process_set_time_stop %d %s\n",Config->process_id,format_time(t_daqstop)); - printf("DBINFO - process_set_n_files %d %d\n",Config->process_id,fileIndex); printf("DBINFO - process_set_total_events %d %d\n",Config->process_id,totalWriteEvents); printf("DBINFO - process_set_total_size %d %ld\n",Config->process_id,totalWriteSize); - + if ( strcmp(Config->output_mode,"FILE")==0 ) { + printf("DBINFO - process_set_n_files %d %d\n",Config->process_id,fileIndex); + } // Give some final report evtReadPerSec = 0.; sizeReadPerSec = 0.; From 9a4db323d195e271f608edb574a8b72695704a27 Mon Sep 17 00:00:00 2001 From: Emanuele Leonardi Date: Wed, 27 Nov 2019 14:28:35 +0100 Subject: [PATCH 28/64] PadmeLevel1: updated for new DB schema --- Level1/PadmeLevel1.cpp | 109 ++++++++++-------- Level1/PadmeMerger.cpp | 196 +++++++++++++++++++------------- Level1/include/Configuration.hh | 8 +- Level1/include/DBService.hh | 13 +++ Level1/include/RootIO.hh | 2 +- Level1/src/Configuration.cc | 12 +- Level1/src/DBService.cc | 2 +- Level1/src/RootIO.cc | 40 ++++--- 8 files changed, 229 insertions(+), 153 deletions(-) diff --git a/Level1/PadmeLevel1.cpp b/Level1/PadmeLevel1.cpp index 026c227f..f829e44f 100644 --- a/Level1/PadmeLevel1.cpp +++ b/Level1/PadmeLevel1.cpp @@ -9,6 +9,16 @@ #include "RootIO.hh" #include "EventTags.hh" +//char* format_time(const time_t time) +//{ +// stati char tform[20]; +// struct tm* t = gmtime(t); +// sprintf(tform,"%04d/%02d/%02d %02d:%02d:%02d", +// 1900+t->tm_year,1+t->tm_mon,t->tm_mday, +// t->tm_hour,t->tm_min,t->tm_sec); +// return tform; +//} + int main(int argc, char* argv[]) { @@ -29,6 +39,7 @@ int main(int argc, char* argv[]) // Get default parameters from configurator int runnr = cfg->RunNumber(); + int process_id = cfg->ProcessId(); unsigned int nevts = cfg->NEventsPerFile(); unsigned int verbose = cfg->Verbose(); std::string rawhead = cfg->RawFileHeader(); @@ -36,7 +47,7 @@ int main(int argc, char* argv[]) // Parse options int c; - while ((c = getopt (argc, argv, "n:r:i:o:v:h")) != -1) { + while ((c = getopt (argc, argv, "n:r:I:i:o:v:h")) != -1) { switch (c) { case 'r': @@ -52,9 +63,25 @@ int main(int argc, char* argv[]) fprintf (stderr, "Error while processing option '-r'. Run number set to %d (must be >=0).\n", runnr); exit(1); } - fprintf(stdout,"Merging files from run %d\n",runnr); + fprintf(stdout,"Filtering files from run %d\n",runnr); cfg->SetRunNumber(runnr); break; + case 'I': + if (process_id != cfg->ProcessId()) { + fprintf (stderr, "Error while processing option '-I'. Multiple ids specified.\n"); + exit(1); + } + if (sscanf(optarg,"%d",&process_id) != 1) { + fprintf (stderr, "Error while processing option '-I'. Wrong parameter '%s'.\n", optarg); + exit(1); + } + if (process_id<0) { + fprintf (stderr, "Error while processing option '-I'. Process id set to %d (must be >=0).\n", process_id); + exit(1); + } + fprintf(stdout,"Level1 process id is %d\n",process_id); + cfg->SetProcessId(process_id); + break; case 'n': if ( sscanf(optarg,"%u",&nevts) != 1 ) { fprintf (stderr, "Error while processing option '-n'. Wrong parameter '%s'.\n", optarg); @@ -82,10 +109,11 @@ int main(int argc, char* argv[]) cfg->SetVerbose(verbose); break; case 'h': - fprintf(stdout,"\nPadmeLevel1 -i input_stream [-o rawfile_head] [-r run_number] [-n events] [-v level] [-h]\n\n"); + fprintf(stdout,"\nPadmeLevel1 -i input_stream [-o rawfile_head] [-r run_number] [-I process_id] [-n events] [-v level] [-h]\n\n"); fprintf(stdout," -i: define input stream FIFO file\n"); fprintf(stdout," -o: define rawdata output files header. Includes path. (default: %s)\n",cfg->RawFileHeader().c_str()); fprintf(stdout," -r: define run number being processes (default: %d)\n",cfg->RunNumber()); + fprintf(stdout," -I: define DB id of this Level1 process assigned by RunControl (default: %d)\n",cfg->ProcessId()); fprintf(stdout," -n: define max number of events per output file (0=no limit, default: %u)\n",cfg->NEventsPerFile()); fprintf(stdout," -v: define verbose level (default: %u)\n",cfg->Verbose()); fprintf(stdout," -h: show this help message and exit\n\n"); @@ -95,7 +123,7 @@ int main(int argc, char* argv[]) // verbose with no argument: increas verbose level by 1 cfg->SetVerbose(cfg->Verbose()+1); break; - } else if (optopt == 'r' || optopt == 'i' || optopt == 'o' || optopt == 'n') + } else if (optopt == 'r' || optopt == 'I' || optopt == 'i' || optopt == 'o' || optopt == 'n') fprintf (stderr, "Option -%c requires an argument.\n", optopt); else if (isprint(optopt)) fprintf (stderr, "Unknown option `-%c'.\n", optopt); @@ -114,22 +142,22 @@ int main(int argc, char* argv[]) } // If this is an official run, connect to DB and get id of merger process - // N.B. merger id is needed to assign root files in DB - if (cfg->RunNumber()) { - - // Get handle to DB - DBService* db = DBService::GetInstance(); - - // Get id of merger for future DB accesses - int merger_id = 0; - rc = db->GetMergerId(merger_id,cfg->RunNumber()); - if (rc != DBSERVICE_OK) { - printf("ERROR retrieving from DB id of merger process for run %d. Aborting\n",cfg->RunNumber()); - exit(1); - } - cfg->SetMergerId(merger_id); - - } + //// N.B. merger id is needed to assign root files in DB + //if (cfg->RunNumber()) { + // + // // Get handle to DB + // DBService* db = DBService::GetInstance(); + // + // // Get id of merger for future DB accesses + // int merger_id = 0; + // rc = db->GetMergerId(merger_id,cfg->RunNumber()); + // if (rc != DBSERVICE_OK) { + // printf("ERROR retrieving from DB id of merger process for run %d. Aborting\n",cfg->RunNumber()); + // exit(1); + // } + // cfg->SetMergerId(merger_id); + // + //} // Connect to root services RootIO* root = new RootIO(); @@ -146,6 +174,11 @@ int main(int argc, char* argv[]) // We are now ready to process data: get start time time_t time_start; time(&time_start); + //printf("=== PadmeLevel1 starting on %s UTC ===\n",format_time(time_start)); + printf("=== PadmeLevel1 starting on %s UTC ===\n",cfg->FormatTime(time_start)); + + printf("DBINFO - process_set_status %d %d\n",cfg->ProcessId(),DB_STATUS_RUNNING); + printf("DBINFO - process_set_time_start %d %d\n",cfg->ProcessId(),cfg->FormatTime(time_start)); // Define counters for input stream size and number of events unsigned long int input_size = 0; @@ -567,34 +600,14 @@ int main(int argc, char* argv[]) printf("Events written: %u (%6.1f events/sec)\n",root->GetTotalEvents(),evtpsec); printf("Bytes written: %llu (%10.1f bytes/sec)\n",root->GetTotalSize(),bytepsec); - //// If input was from a real run, update DB - //if (cfg->RunNumber()) { - // - // // Get handle to DB - // DBService* db = DBService::GetInstance(); - // - // // Update merger status - // rc = db->SetMergerStatus(3,cfg->MergerId()); - // if (rc != DBSERVICE_OK) { - // printf("ERROR setting merger status in DB. Aborting\n"); - // exit(1); - // } - // - // // Update merger stop time - // rc = db->SetMergerTime("STOP",cfg->MergerId()); - // if (rc != DBSERVICE_OK) { - // printf("ERROR setting merger stop time in DB. Aborting\n"); - // exit(1); - // } - // - // // Update DB with final counters (files created, events written, data written) - // rc = db->UpdateMergerInfo(root->GetTotalFiles(),root->GetTotalEvents(),root->GetTotalSize(),cfg->MergerId()); - // if (rc != DBSERVICE_OK) { - // printf("ERROR updating DB with number of files (n=%u) number of events (n=%u) and output size (size=%llu) for merger id %d. Aborting\n",root->GetTotalFiles(),root->GetTotalEvents(),root->GetTotalSize(),cfg->MergerId()); - // exit(1); - // } - // - //} + printf("DBINFO - process_set_status %d %d\n",cfg->ProcessId(),DB_STATUS_FINISHED); + printf("DBINFO - process_set_time_stop %d %d\n",cfg->ProcessId(),cfg->FormatTime(time_stop)); + printf("DBINFO - process_set_n_files %d %d\n",cfg->ProcessId(),root->GetTotalFiles()); + printf("DBINFO - process_set_total_events %d %d\n",cfg->ProcessId(),root->GetTotalEvents()); + printf("DBINFO - process_set_total_size %d %ld\n",cfg->ProcessId(),root->GetTotalSize()); + + // Show exit time + printf("=== PadmeMerger exiting on %s UTC ===\n",cfg->FormatTime(time(0))); exit(0); diff --git a/Level1/PadmeMerger.cpp b/Level1/PadmeMerger.cpp index be389dde..5e82209e 100644 --- a/Level1/PadmeMerger.cpp +++ b/Level1/PadmeMerger.cpp @@ -28,27 +28,35 @@ #define F_GETPIPE_SZ 1032 #define PIPESIZE_MB 16 -void fmt_time(char buf[20],time_t* t) -{ - struct tm* tgm = gmtime(t); - sprintf(buf,"%04d-%02d-%02d %02d:%02d:%02d",1900+tgm->tm_year,tgm->tm_mon+1,tgm->tm_mday,tgm->tm_hour,tgm->tm_min,tgm->tm_sec); -} +//void fmt_time(char buf[20],time_t* t) +//{ +// struct tm* tgm = gmtime(t); +// sprintf(buf,"%04d-%02d-%02d %02d:%02d:%02d",1900+tgm->tm_year,tgm->tm_mon+1,tgm->tm_mday,tgm->tm_hour,tgm->tm_min,tgm->tm_sec); +//} + +//char* format_time(const time_t time) +//{ +// stati char tform[20]; +// struct tm* t = gmtime(t); +// sprintf(tform,"%04d/%02d/%02d %02d:%02d:%02d", +// 1900+t->tm_year,1+t->tm_mon,t->tm_mday, +// t->tm_hour,t->tm_min,t->tm_sec); +// return tform; +//} int main(int argc, char* argv[]) { - int rc; // DB library retrun code + //int rc; // DB library return code - time_t time_start, time_stop, time_first, time_last; - char t_fmt[20]; // Formatted time string "YYYY-MM-DD hh:mm:ss" + //time_t time_start, time_stop, time_first, time_last; + time_t time_start, time_first, time_last; + //char t_fmt[20]; // Formatted time string "YYYY-MM-DD hh:mm:ss" // Set standard output/error in unbuffered mode setbuf(stdout,NULL); setbuf(stderr,NULL); - time(&time_start); fmt_time(t_fmt,&time_start); - printf("=== PadmeMerger starting on %s UTC ===\n",t_fmt); - // Make sure we are on a sane machine (int: 32bits, long int: 64bits) if (sizeof(int) < 4 || sizeof(long int) < 8) { printf("*** ERROR *** On this machine int is %lu bytes and long int is %lu bytes. Aborting.\n",sizeof(int),sizeof(long int)); @@ -60,15 +68,14 @@ int main(int argc, char* argv[]) // Get default parameters from configurator int run_number = cfg->RunNumber(); + int process_id = cfg->ProcessId(); std::string input_stream_list = cfg->InputStreamList(); std::string output_stream_list = cfg->OutputStreamList(); - //std::string raw_file_header = cfg->RawFileHeader(); - //unsigned int n_events_per_file = cfg->NEventsPerFile(); unsigned int verbose = cfg->Verbose(); // Parse options int c; - while ((c = getopt (argc, argv, "r:i:o:v:h")) != -1) { + while ((c = getopt (argc, argv, "r:I:i:o:v:h")) != -1) { switch (c) { case 'r': @@ -87,6 +94,22 @@ int main(int argc, char* argv[]) fprintf(stdout,"Merging files from run %d\n",run_number); cfg->SetRunNumber(run_number); break; + case 'I': + if (process_id != cfg->ProcessId()) { + fprintf (stderr, "Error while processing option '-I'. Multiple ids specified.\n"); + exit(1); + } + if (sscanf(optarg,"%d",&process_id) != 1) { + fprintf (stderr, "Error while processing option '-I'. Wrong parameter '%s'.\n", optarg); + exit(1); + } + if (process_id<0) { + fprintf (stderr, "Error while processing option '-I'. Process id set to %d (must be >=0).\n", process_id); + exit(1); + } + fprintf(stdout,"Merger process id is %d\n",process_id); + cfg->SetProcessId(process_id); + break; case 'o': output_stream_list = optarg; fprintf(stdout,"Merged data will be written to streams listed in '%s'\n",output_stream_list.c_str()); @@ -106,8 +129,9 @@ int main(int argc, char* argv[]) cfg->SetVerbose(verbose); break; case 'h': - fprintf(stdout,"\nPadmeMerger [-r run_number] [-i input_stream_list] [-o output_stream_list] [-v verbose_level] [-h]\n\n"); + fprintf(stdout,"\nPadmeMerger [-r run_number] [-I process_id] [-i input_stream_list] [-o output_stream_list] [-v verbose_level] [-h]\n\n"); fprintf(stdout," -r: define run number being processed (default: %d)\n",cfg->RunNumber()); + fprintf(stdout," -I: define DB id of this Merger process assigned by RunControl (default: %d)\n",cfg->ProcessId()); fprintf(stdout," -i: define file with list of input streams (default: '%s')\n",cfg->InputStreamList().c_str()); fprintf(stdout," -o: define file with list of output streams (default: '%s')\n",cfg->OutputStreamList().c_str()); fprintf(stdout," -v: define verbose level (default: %u)\n",cfg->Verbose()); @@ -118,7 +142,7 @@ int main(int argc, char* argv[]) // verbose with no argument: increas verbose level by 1 cfg->SetVerbose(cfg->Verbose()+1); break; - } else if (optopt == 'r' || optopt == 'i' || optopt == 'o') + } else if (optopt == 'r' || optopt == 'I' || optopt == 'i' || optopt == 'o') fprintf (stderr, "Option -%c requires an argument.\n", optopt); else if (isprint(optopt)) fprintf (stderr, "Unknown option `-%c'.\n", optopt); @@ -136,22 +160,27 @@ int main(int argc, char* argv[]) exit(1); } - // If this is an official run, connect to DB and get id of merger - if (cfg->RunNumber()) { - - // Get handle to DB - DBService* db = DBService::GetInstance(); + time(&time_start); + //fmt_time(t_fmt,&time_start); + //printf("=== PadmeMerger starting on %s UTC ===\n",t_fmt); + printf("=== PadmeMerger starting on %s UTC ===\n",cfg->FormatTime(time_start)); - // Get id of merger for future DB accesses - int merger_id = 0; - rc = db->GetMergerId(merger_id,cfg->RunNumber()); - if (rc != DBSERVICE_OK) { - printf("ERROR retrieving from DB id of merger process for run %d. Aborting\n",cfg->RunNumber()); - exit(1); - } - cfg->SetMergerId(merger_id); - - } + // If this is an official run, connect to DB and get id of merger + //if (cfg->RunNumber()) { + // + // // Get handle to DB + // DBService* db = DBService::GetInstance(); + // + // // Get id of merger for future DB accesses + // int merger_id = 0; + // rc = db->GetMergerId(merger_id,cfg->RunNumber()); + // if (rc != DBSERVICE_OK) { + // printf("ERROR retrieving from DB id of merger process for run %d. Aborting\n",cfg->RunNumber()); + // exit(1); + // } + // cfg->SetMergerId(merger_id); + // + //} ADCBoard* board; std::vector boards; @@ -275,26 +304,28 @@ int main(int argc, char* argv[]) printf("- Using a total of %u output Level1 streams\n",NOutputStreams); // Everything is set: tell DB merger has started - if (cfg->RunNumber()) { - - // Get handle to DB - DBService* db = DBService::GetInstance(); - - // Update merger status - rc = db->SetMergerStatus(2,cfg->MergerId()); - if (rc != DBSERVICE_OK) { - printf("ERROR setting merger status in DB. Aborting\n"); - exit(1); - } - - // Update merger start time - rc = db->SetMergerTime("START",cfg->MergerId()); - if (rc != DBSERVICE_OK) { - printf("ERROR setting merger start time in DB. Aborting\n"); - exit(1); - } - - } + //if (cfg->RunNumber()) { + // + // // Get handle to DB + // DBService* db = DBService::GetInstance(); + // + // // Update merger status + // rc = db->SetMergerStatus(2,cfg->ProcessId()); + // if (rc != DBSERVICE_OK) { + // printf("ERROR setting merger status in DB. Aborting\n"); + // exit(1); + // } + // + // // Update merger start time + // rc = db->SetMergerTime("START",cfg->ProcessId()); + // if (rc != DBSERVICE_OK) { + // printf("ERROR setting merger start time in DB. Aborting\n"); + // exit(1); + // } + // + //} + printf("DBINFO - process_set_status %d %d\n",cfg->ProcessId(),DB_STATUS_RUNNING); + printf("DBINFO - process_set_time_start %d %s\n",cfg->ProcessId(),cfg->FormatTime(time_start)); unsigned int CurrentOutputStream = 0; // First event will be sent to first output stream @@ -724,38 +755,41 @@ int main(int argc, char* argv[]) } printf("Total Events %7u Data %11.1f MiB Rates %6.1f evt/s %7.3f MiB/s\n",NumberOfEvents,size_mib,event_rate,data_rate); - // If input was from a real run, update DB - if (cfg->RunNumber()) { - - // Get handle to DB - DBService* db = DBService::GetInstance(); - - // Update merger status - rc = db->SetMergerStatus(3,cfg->MergerId()); - if (rc != DBSERVICE_OK) { - printf("ERROR setting merger status in DB. Aborting\n"); - exit(1); - } - - // Update merger stop time - rc = db->SetMergerTime("STOP",cfg->MergerId()); - if (rc != DBSERVICE_OK) { - printf("ERROR setting merger stop time in DB. Aborting\n"); - exit(1); - } - - // Update DB with final counters (files created, events written, data written) - //rc = db->UpdateMergerInfo(root->GetTotalFiles(),root->GetTotalEvents(),root->GetTotalSize(),cfg->MergerId()); - //if (rc != DBSERVICE_OK) { - // printf("ERROR updating DB with number of files (n=%u) number of events (n=%u) and output size (size=%lu) for merger id %d. Aborting\n",root->GetTotalFiles(),root->GetTotalEvents(),root->GetTotalSize(),cfg->MergerId()); - // exit(1); - //} - - } + //// If input was from a real run, update DB + //if (cfg->RunNumber()) { + // + // // Get handle to DB + // DBService* db = DBService::GetInstance(); + // + // // Update merger status + // rc = db->SetMergerStatus(3,cfg->ProcessId()); + // if (rc != DBSERVICE_OK) { + // printf("ERROR setting merger status in DB. Aborting\n"); + // exit(1); + // } + // + // // Update merger stop time + // rc = db->SetMergerTime("STOP",cfg->ProcessId()); + // if (rc != DBSERVICE_OK) { + // printf("ERROR setting merger stop time in DB. Aborting\n"); + // exit(1); + // } + // + // // Update DB with final counters (files created, events written, data written) + // //rc = db->UpdateMergerInfo(root->GetTotalFiles(),root->GetTotalEvents(),root->GetTotalSize(),cfg->ProcessId()); + // //if (rc != DBSERVICE_OK) { + // // printf("ERROR updating DB with number of files (n=%u) number of events (n=%u) and output size (size=%lu) for merger id %d. Aborting\n",root->GetTotalFiles(),root->GetTotalEvents(),root->GetTotalSize(),cfg->ProcessId()); + // // exit(1); + // //} + // + //} + printf("DBINFO - process_set_status %d %d\n",cfg->ProcessId(),DB_STATUS_FINISHED); + printf("DBINFO - process_set_time_stop %d %s\n",cfg->ProcessId(),cfg->FormatTime(time_last)); + printf("DBINFO - process_set_total_events %d %d\n",cfg->ProcessId(),NumberOfEvents); + printf("DBINFO - process_set_total_size %d %llu\n",cfg->ProcessId(),total_output_size); // Show exit time - time(&time_stop); fmt_time(t_fmt,&time_stop); - printf("=== PadmeMerger exiting on %s UTC ===\n",t_fmt); + printf("=== PadmeMerger exiting on %s UTC ===\n",cfg->FormatTime(time(0))); exit(0); } diff --git a/Level1/include/Configuration.hh b/Level1/include/Configuration.hh index 82f405e3..58120275 100644 --- a/Level1/include/Configuration.hh +++ b/Level1/include/Configuration.hh @@ -39,8 +39,8 @@ public: void SetNEventsPerFile(unsigned int n) { fNEventsPerFile = n; } unsigned int NEventsPerFile() { return fNEventsPerFile; } - void SetMergerId(int i) { fMergerId = i; } - int MergerId() { return fMergerId; } + void SetProcessId(int i) { fProcessId = i; } + int ProcessId() { return fProcessId; } void SetVerbose(unsigned int v) { fVerbose = v; } unsigned int Verbose() { return fVerbose; } @@ -48,11 +48,13 @@ public: void SetDebugScale(unsigned int v) { fDebugScale = v; } unsigned int DebugScale() { return fDebugScale; } + char* FormatTime(const time_t); + private: int fRunNumber; - int fMergerId; + int fProcessId; std::string fInputStream; diff --git a/Level1/include/DBService.hh b/Level1/include/DBService.hh index 1e653949..6aa65cd7 100644 --- a/Level1/include/DBService.hh +++ b/Level1/include/DBService.hh @@ -5,11 +5,24 @@ #include #include +// Return codes for DB service calls #define DBSERVICE_OK 0 #define DBSERVICE_ERROR 1 #define DBSERVICE_SQLERROR 2 #define DBSERVICE_CONNECTERROR 3 +// Definition of process status values in DB +#define DB_STATUS_IDLE 0 +#define DB_STATUS_INITIALIZING 1 +#define DB_STATUS_INIT_FAIL 2 +#define DB_STATUS_INITIALIZED 3 +#define DB_STATUS_ABORTED 4 +#define DB_STATUS_RUNNING 5 +#define DB_STATUS_RUN_FAIL 6 +#define DB_STATUS_FINISHED 7 +#define DB_STATUS_CLOSE_FAIL 8 +#define DB_STATUS_UNKNOWN 9 + class DBService { diff --git a/Level1/include/RootIO.hh b/Level1/include/RootIO.hh index 809a4e9e..92fa0f35 100644 --- a/Level1/include/RootIO.hh +++ b/Level1/include/RootIO.hh @@ -62,7 +62,7 @@ private: Int_t SetOutFile(); Configuration* fConfig; - DBService* fDB; + //DBService* fDB; UInt_t fOutEventsTotal; ULong_t fOutSizeTotal; diff --git a/Level1/src/Configuration.cc b/Level1/src/Configuration.cc index e9e2fd84..31dbffad 100644 --- a/Level1/src/Configuration.cc +++ b/Level1/src/Configuration.cc @@ -6,7 +6,7 @@ Configuration::Configuration() { // Set default configuration parameters fRunNumber = 0; - fMergerId = -1; + fProcessId = -1; fInputStream = ""; fInputStreamList = ""; fOutputStreamList = ""; @@ -24,3 +24,13 @@ Configuration* Configuration::GetInstance() if ( fInstance == 0 ) { fInstance = new Configuration(); } return fInstance; } + +char* Configuration::FormatTime(const time_t tt) +{ + static char tform[20]; + struct tm* t = gmtime(&tt); + sprintf(tform,"%04d/%02d/%02d %02d:%02d:%02d", + 1900+t->tm_year,1+t->tm_mon,t->tm_mday, + t->tm_hour,t->tm_min,t->tm_sec); + return tform; +} diff --git a/Level1/src/DBService.cc b/Level1/src/DBService.cc index f4b6933d..9a8f2603 100644 --- a/Level1/src/DBService.cc +++ b/Level1/src/DBService.cc @@ -319,7 +319,7 @@ int DBService::SetMergerTime(std::string sel, int merger_id) } else { - printf("DBService::SetMergerTime ERROR - unknown time tpe '%s' requested for merger id %d\n", + printf("DBService::SetMergerTime ERROR - unknown time type '%s' requested for merger id %d\n", sel.c_str(),merger_id); return DBSERVICE_ERROR; diff --git a/Level1/src/RootIO.cc b/Level1/src/RootIO.cc index a0d3940b..5c5fcb9b 100644 --- a/Level1/src/RootIO.cc +++ b/Level1/src/RootIO.cc @@ -9,7 +9,6 @@ RootIO::RootIO() { // Create TFile handle - //fTFileHandle = new TFile(); fTFileHandle = 0; // Create TRawEvent object @@ -19,8 +18,8 @@ RootIO::RootIO() fConfig = Configuration::GetInstance(); // Connect to DB service - fDB = 0; - if (fConfig->RunNumber()) fDB = DBService::GetInstance(); + //fDB = 0; + //if (fConfig->RunNumber()) fDB = DBService::GetInstance(); } @@ -34,7 +33,7 @@ RootIO::~RootIO() int RootIO::Init() { - fOutFileDBId = 0; + //fOutFileDBId = 0; fOutFileIndex = 0; fOutFileEvents = 0; fOutEventsTotal = 0; @@ -103,13 +102,15 @@ Int_t RootIO::OpenOutFile() fOutFileEvents = 0; // Register file in DB - if (fDB) { - int rc = fDB->OpenRawFile(fOutFileDBId,fConfig->MergerId(),fOutFile.Data(),fOutFileIndex); - if (rc != DBSERVICE_OK) { - printf("RootIO::OpenOutFile - ERROR while updating DB\n"); - return ROOTIO_ERROR; - } - } + //if (fDB) { + // int rc = fDB->OpenRawFile(fOutFileDBId,fConfig->MergerId(),fOutFile.Data(),fOutFileIndex); + // if (rc != DBSERVICE_OK) { + // printf("RootIO::OpenOutFile - ERROR while updating DB\n"); + // return ROOTIO_ERROR; + // } + //} + printf("DBINFO - file_create %s %s %d %d %d\n",fOutFile.Data(),"RAWDATA",3,fConfig->ProcessId(),fOutFileIndex); + printf("DBINFO - file_set_time_open %s %s\n",fOutFile.Data(),fConfig->FormatTime(time(0))); return ROOTIO_OK; @@ -135,13 +136,16 @@ Int_t RootIO::CloseOutFile() delete fTFileHandle; // This takes also care of deleting the TTree // Update DB with file close information - if (fDB) { - int rc = fDB->CloseRawFile(fOutFileDBId,fOutFileEvents,fOutFileSize); - if (rc != DBSERVICE_OK) { - printf("RootIO::CloseOutFile - ERROR while updating DB\n"); - return ROOTIO_ERROR; - } - } + //if (fDB) { + // int rc = fDB->CloseRawFile(fOutFileDBId,fOutFileEvents,fOutFileSize); + // if (rc != DBSERVICE_OK) { + // printf("RootIO::CloseOutFile - ERROR while updating DB\n"); + // return ROOTIO_ERROR; + // } + //} + printf("DBINFO - file_set_time_close %s %s\n",fOutFile.Data(),fConfig->FormatTime(time(0))); + printf("DBINFO - file_set_n_events %s %u\n",fOutFile.Data(),fOutFileEvents); + printf("DBINFO - file_set_size %s %lu\n",fOutFile.Data(),fOutFileSize); return ROOTIO_OK; } From e65768b1fab9fcbefb2ac7d7907a0884ecdc2a0f Mon Sep 17 00:00:00 2001 From: Emanuele Leonardi Date: Thu, 28 Nov 2019 12:15:52 +0100 Subject: [PATCH 29/64] RunControl: improvedhandling of config parameters --- RunControl/code/ADCBoard.py | 512 +++++++++++++++++++++++------------- RunControl/code/Level1.py | 103 +++++--- RunControl/code/Merger.py | 93 ++++--- RunControl/code/Run.py | 307 +++++++++++++-------- RunControl/code/Trigger.py | 361 +++++++++++++++---------- 5 files changed, 879 insertions(+), 497 deletions(-) diff --git a/RunControl/code/ADCBoard.py b/RunControl/code/ADCBoard.py index f5e0e636..db0de3b8 100644 --- a/RunControl/code/ADCBoard.py +++ b/RunControl/code/ADCBoard.py @@ -183,123 +183,253 @@ def read_setup(self,setup): print l f.close() - def format_config_daq(self): - - cfgstring = "" - cfgstring += "daq_dir\t\t\t%s\n"%self.daq_dir - cfgstring += "ssh_id_file\t\t%s\n"%self.ssh_id_file - cfgstring += "executable\t\t%s\n"%self.executable - cfgstring += "start_file\t\t%s\n"%self.start_file - cfgstring += "quit_file\t\t%s\n"%self.quit_file - - cfgstring += "run_number\t\t%d\n"%self.run_number - cfgstring += "board_id\t\t%d\n"%self.board_id - cfgstring += "process_mode\t\t%s\n"%self.process_mode - if (self.run_number): cfgstring += "process_id\t\t%d\n"%self.proc_daq_id - - cfgstring += "node_id\t\t\t%d\n"%self.node_id - cfgstring += "node_ip\t\t\t%s\n"%self.node_ip - cfgstring += "conet2_link\t\t%d\n"%self.conet2_link - cfgstring += "conet2_slot\t\t%d\n"%self.conet2_slot - - cfgstring += "config_file\t\t%s\n"%self.config_file_daq - cfgstring += "log_file\t\t%s\n"%self.log_file_daq - cfgstring += "lock_file\t\t%s\n"%self.lock_file_daq - cfgstring += "initok_file\t\t%s\n"%self.initok_file_daq - cfgstring += "initfail_file\t\t%s\n"%self.initfail_file_daq - - cfgstring += "output_mode\t\t\t%s\n"%self.output_mode_daq + def config_list_daq(self): + + cfg_list = [] + + cfg_list.append(["daq_dir", self.daq_dir]) + cfg_list.append(["ssh_id_file", self.ssh_id_file]) + cfg_list.append(["executable", self.executable]) + cfg_list.append(["start_file", self.start_file]) + cfg_list.append(["quit_file", self.quit_file]) + + cfg_list.append(["run_number", str(self.run_number)]) + cfg_list.append(["board_id", str(self.board_id)]) + cfg_list.append(["process_mode", self.process_mode]) + if (self.run_number): + cfg_list.append(["process_id", str(self.proc_daq_id)]) + + cfg_list.append(["node_id", str(self.node_id)]) + cfg_list.append(["node_ip", self.node_ip]) + cfg_list.append(["conet2_link", str(self.conet2_link)]) + cfg_list.append(["conet2_slot", str(self.conet2_slot)]) + + cfg_list.append(["config_file", self.config_file_daq]) + cfg_list.append(["log_file", self.log_file_daq]) + cfg_list.append(["lock_file", self.lock_file_daq]) + cfg_list.append(["initok_file", self.initok_file_daq]) + cfg_list.append(["initfail_file", self.initfail_file_daq]) + + cfg_list.append(["output_mode", self.output_mode_daq]) if self.output_mode_daq == "STREAM": - cfgstring += "output_stream\t\t\t%s\n"%self.output_stream_daq + cfg_list.append(["output_stream", self.output_stream_daq]) elif self.output_mode_daq == "FILE": - cfgstring += "data_dir\t\t\t%s\n"%self.data_dir_daq - cfgstring += "data_file\t\t\t%s\n"%self.data_file_daq - cfgstring += "file_max_duration\t\t%d\n"%self.file_max_duration - cfgstring += "file_max_size\t\t\t%d\n"%self.file_max_size - cfgstring += "file_max_events\t\t%d\n"%self.file_max_events + cfg_list.append(["data_dir", self.data_dir_daq]) + cfg_list.append(["data_file", self.data_file_daq]) + cfg_list.append(["file_max_duration",str(self.file_max_duration)]) + cfg_list.append(["file_max_size", str(self.file_max_size)]) + cfg_list.append(["file_max_events", str(self.file_max_events)]) - cfgstring += "total_daq_time\t\t%d\n"%self.total_daq_time + cfg_list.append(["total_daq_time", str(self.total_daq_time)]) - cfgstring += "startdaq_mode\t\t%d\n"%self.startdaq_mode - cfgstring += "trigger_mode\t\t%d\n"%self.trigger_mode - cfgstring += "trigger_iolevel\t\t%s\n"%self.trigger_iolevel + cfg_list.append(["startdaq_mode", str(self.startdaq_mode)]) + cfg_list.append(["trigger_mode", str(self.trigger_mode)]) + cfg_list.append(["trigger_iolevel", self.trigger_iolevel]) - cfgstring += "group_enable_mask\t%#1x\n"%self.group_enable_mask - cfgstring += "channel_enable_mask\t%#08x\n"%self.channel_enable_mask + cfg_list.append(["group_enable_mask", "%#1x"%self.group_enable_mask]) + cfg_list.append(["channel_enable_mask", "%#08x"%self.channel_enable_mask]) - cfgstring += "offset_global\t\t%#04x\n"%self.offset_global + cfg_list.append(["offset_global", "%#04x"%self.offset_global]) for ch in range(32): if (self.offset_ch[ch] != self.offset_global): - cfgstring += "offset_ch\t%d\t%#04x\n"%(ch,self.offset_ch[ch]) + cfg_list.append(["offset_ch", "%2d %#04x"%(ch,self.offset_ch[ch])]) - cfgstring += "post_trigger_size\t%d\n"%self.post_trigger_size - cfgstring += "max_num_events_blt\t%d\n"%self.max_num_events_blt + cfg_list.append(["post_trigger_size", str(self.post_trigger_size)]) + cfg_list.append(["max_num_events_blt", str(self.max_num_events_blt)]) - cfgstring += "drs4corr_enable\t\t%d\n"%self.drs4corr_enable - cfgstring += "drs4_sampfreq\t\t%d\n"%self.drs4_sampfreq + cfg_list.append(["drs4corr_enable", str(self.drs4corr_enable)]) + cfg_list.append(["drs4_sampfreq", str(self.drs4_sampfreq)]) - cfgstring += "auto_threshold\t\t%#04x\n"%self.auto_threshold - cfgstring += "auto_duration\t\t%d\n"%self.auto_duration + cfg_list.append(["auto_threshold", "%#04x"%self.auto_threshold]) + cfg_list.append(["auto_duration", str(self.auto_duration)]) - cfgstring += "daq_loop_delay\t\t%d\n"%self.daq_loop_delay + cfg_list.append(["daq_loop_delay", str(self.daq_loop_delay)]) - cfgstring += "debug_scale\t\t\t%d\n"%self.debug_scale + cfg_list.append(["debug_scale", str(self.debug_scale)]) - return cfgstring + return cfg_list - def format_config_zsup(self): + def config_list_zsup(self): - cfgstring = "" - cfgstring += "daq_dir\t\t\t%s\n"%self.daq_dir - cfgstring += "ssh_id_file\t\t%s\n"%self.ssh_id_file - cfgstring += "executable\t\t%s\n"%self.executable + cfg_list = [] - cfgstring += "run_number\t\t%d\n"%self.run_number - cfgstring += "board_id\t\t%d\n"%self.board_id - cfgstring += "process_mode\t\tZSUP\n" - if (self.run_number): cfgstring += "process_id\t\t%d\n"%self.proc_zsup_id + cfg_list.append(["daq_dir", self.daq_dir]) + cfg_list.append(["ssh_id_file", self.ssh_id_file]) + cfg_list.append(["executable", self.executable]) - cfgstring += "node_id\t\t\t%d\n"%self.node_id - cfgstring += "node_ip\t\t\t%s\n"%self.node_ip - #cfgstring += "conet2_link\t\t%d\n"%self.conet2_link - #cfgstring += "conet2_slot\t\t%d\n"%self.conet2_slot + cfg_list.append(["run_number", str(self.run_number)]) + cfg_list.append(["board_id", str(self.board_id)]) + cfg_list.append(["process_mode", "ZSUP"]) + if (self.run_number): + cfg_list.append(["process_id", str(self.proc_zsup_id)]) + + cfg_list.append(["node_id", str(self.node_id)]) + cfg_list.append(["node_ip", self.node_ip]) - cfgstring += "config_file\t\t%s\n"%self.config_file_zsup - cfgstring += "log_file\t\t%s\n"%self.log_file_zsup - cfgstring += "lock_file\t\t%s\n"%self.lock_file_zsup - cfgstring += "initok_file\t\t%s\n"%self.initok_file_zsup - cfgstring += "initfail_file\t\t%s\n"%self.initfail_file_zsup + cfg_list.append(["config_file", self.config_file_zsup]) + cfg_list.append(["log_file", self.log_file_zsup]) + cfg_list.append(["lock_file", self.lock_file_zsup]) + cfg_list.append(["initok_file", self.initok_file_zsup]) + cfg_list.append(["initfail_file", self.initfail_file_zsup]) - cfgstring += "output_mode\t\t\t%s\n"%self.output_mode_zsup + cfg_list.append(["output_mode", self.output_mode_zsup]) if self.output_mode_zsup == "STREAM": - cfgstring += "output_stream\t\t\t%s\n"%self.output_stream_zsup + cfg_list.append(["output_stream", self.output_stream_zsup]) elif self.output_mode_zsup == "FILE": - cfgstring += "data_dir\t\t\t%s\n"%self.data_dir_zsup - cfgstring += "data_file\t\t\t%s\n"%self.data_file_zsup - cfgstring += "file_max_duration\t\t%d\n"%self.file_max_duration - cfgstring += "file_max_size\t\t\t%d\n"%self.file_max_size - cfgstring += "file_max_events\t\t%d\n"%self.file_max_events + cfg_list.append(["data_dir", self.data_dir_zsup]) + cfg_list.append(["data_file", self.data_file_zsup]) + cfg_list.append(["file_max_duration", str(self.file_max_duration)]) + cfg_list.append(["file_max_size", str(self.file_max_size)]) + cfg_list.append(["file_max_events", str(self.file_max_events)]) + + cfg_list.append(["input_stream", self.input_stream_zsup]) + + cfg_list.append(["zero_suppression", str(self.zero_suppression)]) + if (self.zero_suppression%100 == 1): - cfgstring += "input_stream\t\t%s\n"%self.input_stream_zsup + cfg_list.append(["zs1_head", str(self.zs1_head)]) + cfg_list.append(["zs1_tail", str(self.zs1_tail)]) + cfg_list.append(["zs1_nsigma", str(self.zs1_nsigma)]) + cfg_list.append(["zs1_nabovethr", str(self.zs1_nabovethr)]) + cfg_list.append(["zs1_badrmsthr", str(self.zs1_badrmsthr)]) - cfgstring += "zero_suppression\t%d\n"%self.zero_suppression - if (self.zero_suppression%100 == 1): - cfgstring += "zs1_head\t\t%d\n"%self.zs1_head - cfgstring += "zs1_tail\t\t%d\n"%self.zs1_tail - cfgstring += "zs1_nsigma\t\t%f\n"%self.zs1_nsigma - cfgstring += "zs1_nabovethr\t\t%d\n"%self.zs1_nabovethr - cfgstring += "zs1_badrmsthr\t\t%f\n"%self.zs1_badrmsthr elif (self.zero_suppression%100 == 2): - cfgstring += "zs2_tail\t\t%d\n"%self.zs2_tail - cfgstring += "zs2_minrms\t\t%f\n"%self.zs2_minrms + + cfg_list.append(["zs2_tail", str(self.zs2_tail)]) + cfg_list.append(["zs2_minrms", str(self.zs2_minrms)]) for ch in range(32): if (self.zs2_minrms_ch[ch] != self.zs2_minrms): - cfgstring += "zs2_minrms_ch\t%d\t%f\n"%(ch,self.zs2_minrms_ch[ch]) + cfg_list.append(["zs2_minrms_ch","%2d %f"%(ch,self.zs2_minrms_ch[ch])]) - cfgstring += "debug_scale\t\t\t%d\n"%self.debug_scale + cfg_list.append(["debug_scale", str(self.debug_scale)]) + return cfg_list + + def format_config_daq(self): + + cfgstring = "" + for cfg in self.config_list_daq(): cfgstring += "%-30s %s\n"%cfg + return cfgstring + + #cfgstring = "" + #cfgstring += "daq_dir\t\t\t%s\n"%self.daq_dir + #cfgstring += "ssh_id_file\t\t%s\n"%self.ssh_id_file + #cfgstring += "executable\t\t%s\n"%self.executable + #cfgstring += "start_file\t\t%s\n"%self.start_file + #cfgstring += "quit_file\t\t%s\n"%self.quit_file + # + #cfgstring += "run_number\t\t%d\n"%self.run_number + #cfgstring += "board_id\t\t%d\n"%self.board_id + #cfgstring += "process_mode\t\t%s\n"%self.process_mode + #if (self.run_number): cfgstring += "process_id\t\t%d\n"%self.proc_daq_id + # + #cfgstring += "node_id\t\t\t%d\n"%self.node_id + #cfgstring += "node_ip\t\t\t%s\n"%self.node_ip + #cfgstring += "conet2_link\t\t%d\n"%self.conet2_link + #cfgstring += "conet2_slot\t\t%d\n"%self.conet2_slot + # + #cfgstring += "config_file\t\t%s\n"%self.config_file_daq + #cfgstring += "log_file\t\t%s\n"%self.log_file_daq + #cfgstring += "lock_file\t\t%s\n"%self.lock_file_daq + #cfgstring += "initok_file\t\t%s\n"%self.initok_file_daq + #cfgstring += "initfail_file\t\t%s\n"%self.initfail_file_daq + # + #cfgstring += "output_mode\t\t\t%s\n"%self.output_mode_daq + #if self.output_mode_daq == "STREAM": + # cfgstring += "output_stream\t\t\t%s\n"%self.output_stream_daq + #elif self.output_mode_daq == "FILE": + # cfgstring += "data_dir\t\t\t%s\n"%self.data_dir_daq + # cfgstring += "data_file\t\t\t%s\n"%self.data_file_daq + # cfgstring += "file_max_duration\t\t%d\n"%self.file_max_duration + # cfgstring += "file_max_size\t\t\t%d\n"%self.file_max_size + # cfgstring += "file_max_events\t\t%d\n"%self.file_max_events + # + #cfgstring += "total_daq_time\t\t%d\n"%self.total_daq_time + # + #cfgstring += "startdaq_mode\t\t%d\n"%self.startdaq_mode + #cfgstring += "trigger_mode\t\t%d\n"%self.trigger_mode + #cfgstring += "trigger_iolevel\t\t%s\n"%self.trigger_iolevel + # + #cfgstring += "group_enable_mask\t%#1x\n"%self.group_enable_mask + #cfgstring += "channel_enable_mask\t%#08x\n"%self.channel_enable_mask + # + #cfgstring += "offset_global\t\t%#04x\n"%self.offset_global + #for ch in range(32): + # if (self.offset_ch[ch] != self.offset_global): + # cfgstring += "offset_ch\t%d\t%#04x\n"%(ch,self.offset_ch[ch]) + # + #cfgstring += "post_trigger_size\t%d\n"%self.post_trigger_size + #cfgstring += "max_num_events_blt\t%d\n"%self.max_num_events_blt + # + #cfgstring += "drs4corr_enable\t\t%d\n"%self.drs4corr_enable + #cfgstring += "drs4_sampfreq\t\t%d\n"%self.drs4_sampfreq + # + #cfgstring += "auto_threshold\t\t%#04x\n"%self.auto_threshold + #cfgstring += "auto_duration\t\t%d\n"%self.auto_duration + # + #cfgstring += "daq_loop_delay\t\t%d\n"%self.daq_loop_delay + # + #cfgstring += "debug_scale\t\t\t%d\n"%self.debug_scale + # + #return cfgstring + + def format_config_zsup(self): + + cfgstring = "" + for cfg in self.config_list_zsup(): cfgstring += "%-30s %s\n"%cfg return cfgstring + #cfgstring = "" + #cfgstring += "daq_dir\t\t\t%s\n"%self.daq_dir + #cfgstring += "ssh_id_file\t\t%s\n"%self.ssh_id_file + #cfgstring += "executable\t\t%s\n"%self.executable + # + #cfgstring += "run_number\t\t%d\n"%self.run_number + #cfgstring += "board_id\t\t%d\n"%self.board_id + #cfgstring += "process_mode\t\tZSUP\n" + #if (self.run_number): cfgstring += "process_id\t\t%d\n"%self.proc_zsup_id + # + #cfgstring += "node_id\t\t\t%d\n"%self.node_id + #cfgstring += "node_ip\t\t\t%s\n"%self.node_ip + ##cfgstring += "conet2_link\t\t%d\n"%self.conet2_link + ##cfgstring += "conet2_slot\t\t%d\n"%self.conet2_slot + # + #cfgstring += "config_file\t\t%s\n"%self.config_file_zsup + #cfgstring += "log_file\t\t%s\n"%self.log_file_zsup + #cfgstring += "lock_file\t\t%s\n"%self.lock_file_zsup + #cfgstring += "initok_file\t\t%s\n"%self.initok_file_zsup + #cfgstring += "initfail_file\t\t%s\n"%self.initfail_file_zsup + # + #cfgstring += "output_mode\t\t\t%s\n"%self.output_mode_zsup + #if self.output_mode_zsup == "STREAM": + # cfgstring += "output_stream\t\t\t%s\n"%self.output_stream_zsup + #elif self.output_mode_zsup == "FILE": + # cfgstring += "data_dir\t\t\t%s\n"%self.data_dir_zsup + # cfgstring += "data_file\t\t\t%s\n"%self.data_file_zsup + # cfgstring += "file_max_duration\t\t%d\n"%self.file_max_duration + # cfgstring += "file_max_size\t\t\t%d\n"%self.file_max_size + # cfgstring += "file_max_events\t\t%d\n"%self.file_max_events + # + #cfgstring += "input_stream\t\t%s\n"%self.input_stream_zsup + # + #cfgstring += "zero_suppression\t%d\n"%self.zero_suppression + #if (self.zero_suppression%100 == 1): + # cfgstring += "zs1_head\t\t%d\n"%self.zs1_head + # cfgstring += "zs1_tail\t\t%d\n"%self.zs1_tail + # cfgstring += "zs1_nsigma\t\t%f\n"%self.zs1_nsigma + # cfgstring += "zs1_nabovethr\t\t%d\n"%self.zs1_nabovethr + # cfgstring += "zs1_badrmsthr\t\t%f\n"%self.zs1_badrmsthr + #elif (self.zero_suppression%100 == 2): + # cfgstring += "zs2_tail\t\t%d\n"%self.zs2_tail + # cfgstring += "zs2_minrms\t\t%f\n"%self.zs2_minrms + # for ch in range(32): + # if (self.zs2_minrms_ch[ch] != self.zs2_minrms): + # cfgstring += "zs2_minrms_ch\t%d\t%f\n"%(ch,self.zs2_minrms_ch[ch]) + # + #cfgstring += "debug_scale\t\t\t%d\n"%self.debug_scale + # + #return cfgstring + def write_config(self): if self.config_file_daq == "unset": @@ -334,63 +464,67 @@ def create_proc_daq(self): # Add info about optical link self.db.add_daq_process_optical_link(self.proc_daq_id,self.node_id,self.conet2_link,self.conet2_slot) - self.db.add_cfg_para_proc(self.proc_daq_id,"daq_dir", self.daq_dir) - self.db.add_cfg_para_proc(self.proc_daq_id,"ssh_id_file", self.ssh_id_file) - self.db.add_cfg_para_proc(self.proc_daq_id,"executable", self.executable) - self.db.add_cfg_para_proc(self.proc_daq_id,"start_file", self.start_file) - self.db.add_cfg_para_proc(self.proc_daq_id,"quit_file", self.quit_file) - - #self.db.add_cfg_para_proc(self.proc_daq_id,"run_number", repr(self.run_number)) - self.db.add_cfg_para_proc(self.proc_daq_id,"board_id", repr(self.board_id)) - self.db.add_cfg_para_proc(self.proc_daq_id,"process_mode", self.process_mode) - - #self.db.add_cfg_para_proc(self.proc_daq_id,"node_id", repr(self.node_id)) - self.db.add_cfg_para_proc(self.proc_daq_id,"node_ip", self.node_ip) - self.db.add_cfg_para_proc(self.proc_daq_id,"conet2_link", repr(self.conet2_link)) - self.db.add_cfg_para_proc(self.proc_daq_id,"conet2_slot", repr(self.conet2_slot)) - - self.db.add_cfg_para_proc(self.proc_daq_id,"config_file", self.config_file_daq) - self.db.add_cfg_para_proc(self.proc_daq_id,"log_file", self.log_file_daq) - self.db.add_cfg_para_proc(self.proc_daq_id,"lock_file", self.lock_file_daq) - self.db.add_cfg_para_proc(self.proc_daq_id,"initok_file", self.initok_file_daq) - self.db.add_cfg_para_proc(self.proc_daq_id,"initfail_file", self.initfail_file_daq) - - self.db.add_cfg_para_proc(self.proc_daq_id,"output_mode", self.output_mode_daq) - if self.output_mode_daq == "STREAM": - self.db.add_cfg_para_proc(self.proc_daq_id,"output_stream", self.output_stream_daq) - elif self.output_mode_daq == "FILE": - self.db.add_cfg_para_proc(self.proc_daq_id,"data_dir", self.data_dir_daq) - self.db.add_cfg_para_proc(self.proc_daq_id,"data_file", self.data_file_daq) - self.db.add_cfg_para_proc(self.proc_daq_id,"file_max_duration", self.file_max_duration) - self.db.add_cfg_para_proc(self.proc_daq_id,"file_max_size", self.file_max_size) - self.db.add_cfg_para_proc(self.proc_daq_id,"file_max_events", self.file_max_events) - - self.db.add_cfg_para_proc(self.proc_daq_id,"total_daq_time", repr(self.total_daq_time)) - - self.db.add_cfg_para_proc(self.proc_daq_id,"startdaq_mode", repr(self.startdaq_mode)) - self.db.add_cfg_para_proc(self.proc_daq_id,"trigger_mode", repr(self.trigger_mode)) - self.db.add_cfg_para_proc(self.proc_daq_id,"trigger_iolevel", self.trigger_iolevel) - - self.db.add_cfg_para_proc(self.proc_daq_id,"group_enable_mask", "%#1x"%self.group_enable_mask) - self.db.add_cfg_para_proc(self.proc_daq_id,"channel_enable_mask","%#08x"%self.channel_enable_mask) - - self.db.add_cfg_para_proc(self.proc_daq_id,"offset_global", "%#04x"%self.proc_daq_id) - for ch in range(32): - if (self.offset_ch[ch] != self.offset_global): - self.db.add_cfg_para_proc(self.proc_daq_id,"offset_ch", "%d %#04x"%(ch,self.offset_ch[ch])) - - self.db.add_cfg_para_proc(self.proc_daq_id,"post_trigger_size", repr(self.post_trigger_size)) - self.db.add_cfg_para_proc(self.proc_daq_id,"max_num_events_blt", repr(self.max_num_events_blt)) - - self.db.add_cfg_para_proc(self.proc_daq_id,"drs4corr_enable", repr(self.drs4corr_enable)) - self.db.add_cfg_para_proc(self.proc_daq_id,"drs4_sampfreq", repr(self.drs4_sampfreq)) - - self.db.add_cfg_para_proc(self.proc_daq_id,"auto_threshold", "%#04x"%self.auto_threshold) - self.db.add_cfg_para_proc(self.proc_daq_id,"auto_duration", repr(self.auto_duration)) - - self.db.add_cfg_para_proc(self.proc_daq_id,"daq_loop_delay", repr(self.daq_loop_delay)) - - self.db.add_cfg_para_proc(self.proc_daq_id,"debug_scale", repr(self.debug_scale)) + # Add all configuration parameters + for cfg in self.config_list_daq(): + self.db.add_cfg_para_proc(self.proc_daq_id,cfg[0],cfg[1]) + + #self.db.add_cfg_para_proc(self.proc_daq_id,"daq_dir", self.daq_dir) + #self.db.add_cfg_para_proc(self.proc_daq_id,"ssh_id_file", self.ssh_id_file) + #self.db.add_cfg_para_proc(self.proc_daq_id,"executable", self.executable) + #self.db.add_cfg_para_proc(self.proc_daq_id,"start_file", self.start_file) + #self.db.add_cfg_para_proc(self.proc_daq_id,"quit_file", self.quit_file) + # + ##self.db.add_cfg_para_proc(self.proc_daq_id,"run_number", repr(self.run_number)) + #self.db.add_cfg_para_proc(self.proc_daq_id,"board_id", repr(self.board_id)) + #self.db.add_cfg_para_proc(self.proc_daq_id,"process_mode", self.process_mode) + # + ##self.db.add_cfg_para_proc(self.proc_daq_id,"node_id", repr(self.node_id)) + #self.db.add_cfg_para_proc(self.proc_daq_id,"node_ip", self.node_ip) + #self.db.add_cfg_para_proc(self.proc_daq_id,"conet2_link", repr(self.conet2_link)) + #self.db.add_cfg_para_proc(self.proc_daq_id,"conet2_slot", repr(self.conet2_slot)) + # + #self.db.add_cfg_para_proc(self.proc_daq_id,"config_file", self.config_file_daq) + #self.db.add_cfg_para_proc(self.proc_daq_id,"log_file", self.log_file_daq) + #self.db.add_cfg_para_proc(self.proc_daq_id,"lock_file", self.lock_file_daq) + #self.db.add_cfg_para_proc(self.proc_daq_id,"initok_file", self.initok_file_daq) + #self.db.add_cfg_para_proc(self.proc_daq_id,"initfail_file", self.initfail_file_daq) + # + #self.db.add_cfg_para_proc(self.proc_daq_id,"output_mode", self.output_mode_daq) + #if self.output_mode_daq == "STREAM": + # self.db.add_cfg_para_proc(self.proc_daq_id,"output_stream", self.output_stream_daq) + #elif self.output_mode_daq == "FILE": + # self.db.add_cfg_para_proc(self.proc_daq_id,"data_dir", self.data_dir_daq) + # self.db.add_cfg_para_proc(self.proc_daq_id,"data_file", self.data_file_daq) + # self.db.add_cfg_para_proc(self.proc_daq_id,"file_max_duration", self.file_max_duration) + # self.db.add_cfg_para_proc(self.proc_daq_id,"file_max_size", self.file_max_size) + # self.db.add_cfg_para_proc(self.proc_daq_id,"file_max_events", self.file_max_events) + # + #self.db.add_cfg_para_proc(self.proc_daq_id,"total_daq_time", repr(self.total_daq_time)) + # + #self.db.add_cfg_para_proc(self.proc_daq_id,"startdaq_mode", repr(self.startdaq_mode)) + #self.db.add_cfg_para_proc(self.proc_daq_id,"trigger_mode", repr(self.trigger_mode)) + #self.db.add_cfg_para_proc(self.proc_daq_id,"trigger_iolevel", self.trigger_iolevel) + # + #self.db.add_cfg_para_proc(self.proc_daq_id,"group_enable_mask", "%#1x"%self.group_enable_mask) + #self.db.add_cfg_para_proc(self.proc_daq_id,"channel_enable_mask","%#08x"%self.channel_enable_mask) + # + #self.db.add_cfg_para_proc(self.proc_daq_id,"offset_global", "%#04x"%self.proc_daq_id) + #for ch in range(32): + # if (self.offset_ch[ch] != self.offset_global): + # self.db.add_cfg_para_proc(self.proc_daq_id,"offset_ch", "%d %#04x"%(ch,self.offset_ch[ch])) + # + #self.db.add_cfg_para_proc(self.proc_daq_id,"post_trigger_size", repr(self.post_trigger_size)) + #self.db.add_cfg_para_proc(self.proc_daq_id,"max_num_events_blt", repr(self.max_num_events_blt)) + # + #self.db.add_cfg_para_proc(self.proc_daq_id,"drs4corr_enable", repr(self.drs4corr_enable)) + #self.db.add_cfg_para_proc(self.proc_daq_id,"drs4_sampfreq", repr(self.drs4_sampfreq)) + # + #self.db.add_cfg_para_proc(self.proc_daq_id,"auto_threshold", "%#04x"%self.auto_threshold) + #self.db.add_cfg_para_proc(self.proc_daq_id,"auto_duration", repr(self.auto_duration)) + # + #self.db.add_cfg_para_proc(self.proc_daq_id,"daq_loop_delay", repr(self.daq_loop_delay)) + # + #self.db.add_cfg_para_proc(self.proc_daq_id,"debug_scale", repr(self.debug_scale)) return "ok" @@ -402,50 +536,54 @@ def create_proc_zsup(self): print "ADCBoard::create_proc_zsup - ERROR: unable to create new ZSUP proces in DB" return "error" - self.db.add_cfg_para_proc(self.proc_zsup_id,"daq_dir", self.daq_dir) - self.db.add_cfg_para_proc(self.proc_zsup_id,"ssh_id_file", self.ssh_id_file) - self.db.add_cfg_para_proc(self.proc_zsup_id,"executable", self.executable) - - #self.db.add_cfg_para_proc(self.proc_zsup_id,"run_number", repr(self.run_number)) - self.db.add_cfg_para_proc(self.proc_zsup_id,"board_id", repr(self.board_id)) - self.db.add_cfg_para_proc(self.proc_zsup_id,"process_mode", "ZSUP") - - #self.db.add_cfg_para_proc(self.proc_zsup_id,"node_id", repr(self.node_id)) - self.db.add_cfg_para_proc(self.proc_zsup_id,"node_ip", self.node_ip) - - self.db.add_cfg_para_proc(self.proc_zsup_id,"config_file", self.config_file_zsup) - self.db.add_cfg_para_proc(self.proc_zsup_id,"log_file", self.log_file_zsup) - self.db.add_cfg_para_proc(self.proc_zsup_id,"lock_file", self.lock_file_zsup) - self.db.add_cfg_para_proc(self.proc_zsup_id,"initok_file", self.initok_file_zsup) - self.db.add_cfg_para_proc(self.proc_zsup_id,"initfail_file", self.initfail_file_zsup) - - self.db.add_cfg_para_proc(self.proc_zsup_id,"output_mode", self.output_mode_zsup) - if self.output_mode_zsup == "STREAM": - self.db.add_cfg_para_proc(self.proc_zsup_id,"output_stream", self.output_stream_zsup) - elif self.output_mode_zsup == "FILE": - self.db.add_cfg_para_proc(self.proc_zsup_id,"data_dir", self.data_dir_zsup) - self.db.add_cfg_para_proc(self.proc_zsup_id,"data_file", self.data_file_zsup) - self.db.add_cfg_para_proc(self.proc_zsup_id,"file_max_duration", self.file_max_duration) - self.db.add_cfg_para_proc(self.proc_zsup_id,"file_max_size", self.file_max_size) - self.db.add_cfg_para_proc(self.proc_zsup_id,"file_max_events", self.file_max_events) - - self.db.add_cfg_para_proc(self.proc_zsup_id,"input_stream", self.input_stream_zsup) - - self.db.add_cfg_para_proc(self.proc_zsup_id,"zero_suppression", repr(self.zero_suppression)) - if (self.zero_suppression%100 == 1): - self.db.add_cfg_para_proc(self.proc_zsup_id,"zs1_head", repr(self.zs1_head)) - self.db.add_cfg_para_proc(self.proc_zsup_id,"zs1_tail", repr(self.zs1_tail)) - self.db.add_cfg_para_proc(self.proc_zsup_id,"zs1_nsigma", repr(self.zs1_nsigma)) - self.db.add_cfg_para_proc(self.proc_zsup_id,"zs1_nabovethr", repr(self.zs1_nabovethr)) - self.db.add_cfg_para_proc(self.proc_zsup_id,"zs1_badrmsthr", repr(self.zs1_badrmsthr)) - elif (self.zero_suppression%100 == 2): - self.db.add_cfg_para_proc(self.proc_zsup_id,"zs2_tail", repr(self.zs2_tail)) - self.db.add_cfg_para_proc(self.proc_zsup_id,"zs2_minrms", repr(self.zs2_minrms)) - for ch in range(32): - if (self.zs2_minrms_ch[ch] != self.zs2_minrms): - self.db.add_cfg_para_proc(self.proc_zsup_id,"zs2_minrms_ch","%d %d"%(ch,self.zs2_minrms_ch[ch])) - - self.db.add_cfg_para_procself.proc_zsup_id,"debug_scale", repr(self.debug_scale)) + # Add all configuration parameters + for cfg in self.config_list_zsup(): + self.db.add_cfg_para_proc(self.proc_zsup_id,cfg[0],cfg[1]) + + #self.db.add_cfg_para_proc(self.proc_zsup_id,"daq_dir", self.daq_dir) + #self.db.add_cfg_para_proc(self.proc_zsup_id,"ssh_id_file", self.ssh_id_file) + #self.db.add_cfg_para_proc(self.proc_zsup_id,"executable", self.executable) + # + ##self.db.add_cfg_para_proc(self.proc_zsup_id,"run_number", repr(self.run_number)) + #self.db.add_cfg_para_proc(self.proc_zsup_id,"board_id", repr(self.board_id)) + #self.db.add_cfg_para_proc(self.proc_zsup_id,"process_mode", "ZSUP") + # + ##self.db.add_cfg_para_proc(self.proc_zsup_id,"node_id", repr(self.node_id)) + #self.db.add_cfg_para_proc(self.proc_zsup_id,"node_ip", self.node_ip) + # + #self.db.add_cfg_para_proc(self.proc_zsup_id,"config_file", self.config_file_zsup) + #self.db.add_cfg_para_proc(self.proc_zsup_id,"log_file", self.log_file_zsup) + #self.db.add_cfg_para_proc(self.proc_zsup_id,"lock_file", self.lock_file_zsup) + #self.db.add_cfg_para_proc(self.proc_zsup_id,"initok_file", self.initok_file_zsup) + #self.db.add_cfg_para_proc(self.proc_zsup_id,"initfail_file", self.initfail_file_zsup) + # + #self.db.add_cfg_para_proc(self.proc_zsup_id,"output_mode", self.output_mode_zsup) + #if self.output_mode_zsup == "STREAM": + # self.db.add_cfg_para_proc(self.proc_zsup_id,"output_stream", self.output_stream_zsup) + #elif self.output_mode_zsup == "FILE": + # self.db.add_cfg_para_proc(self.proc_zsup_id,"data_dir", self.data_dir_zsup) + # self.db.add_cfg_para_proc(self.proc_zsup_id,"data_file", self.data_file_zsup) + # self.db.add_cfg_para_proc(self.proc_zsup_id,"file_max_duration", self.file_max_duration) + # self.db.add_cfg_para_proc(self.proc_zsup_id,"file_max_size", self.file_max_size) + # self.db.add_cfg_para_proc(self.proc_zsup_id,"file_max_events", self.file_max_events) + # + #self.db.add_cfg_para_proc(self.proc_zsup_id,"input_stream", self.input_stream_zsup) + # + #self.db.add_cfg_para_proc(self.proc_zsup_id,"zero_suppression", repr(self.zero_suppression)) + #if (self.zero_suppression%100 == 1): + # self.db.add_cfg_para_proc(self.proc_zsup_id,"zs1_head", repr(self.zs1_head)) + # self.db.add_cfg_para_proc(self.proc_zsup_id,"zs1_tail", repr(self.zs1_tail)) + # self.db.add_cfg_para_proc(self.proc_zsup_id,"zs1_nsigma", repr(self.zs1_nsigma)) + # self.db.add_cfg_para_proc(self.proc_zsup_id,"zs1_nabovethr", repr(self.zs1_nabovethr)) + # self.db.add_cfg_para_proc(self.proc_zsup_id,"zs1_badrmsthr", repr(self.zs1_badrmsthr)) + #elif (self.zero_suppression%100 == 2): + # self.db.add_cfg_para_proc(self.proc_zsup_id,"zs2_tail", repr(self.zs2_tail)) + # self.db.add_cfg_para_proc(self.proc_zsup_id,"zs2_minrms", repr(self.zs2_minrms)) + # for ch in range(32): + # if (self.zs2_minrms_ch[ch] != self.zs2_minrms): + # self.db.add_cfg_para_proc(self.proc_zsup_id,"zs2_minrms_ch","%d %d"%(ch,self.zs2_minrms_ch[ch])) + # + #self.db.add_cfg_para_proc(self.proc_zsup_id,"debug_scale", repr(self.debug_scale)) return "ok" diff --git a/RunControl/code/Level1.py b/RunControl/code/Level1.py index a607c17d..792bf776 100644 --- a/RunControl/code/Level1.py +++ b/RunControl/code/Level1.py @@ -40,31 +40,62 @@ def set_default_config(self): self.output_dir = "unset" self.output_header = "unset" - def format_config(self): + def config_list(self): - cfgstring = "" - cfgstring += "daq_dir\t\t\t%s\n"%self.daq_dir - cfgstring += "ssh_id_file\t\t%s\n"%self.ssh_id_file - cfgstring += "executable\t\t%s\n"%self.executable + cfg_list = [] + + cfg_list.append(["daq_dir", self.daq_dir]) + cfg_list.append(["ssh_id_file", self.ssh_id_file]) + cfg_list.append(["executable", self.executable]) + + cfg_list.append(["run_number", str(self.run_number)]) + cfg_list.append(["level1_id", str(self.level1_id)]) + if (self.run_number): + cfg_list.append(["process_id", str(self.process_id)]) - cfgstring += "run_number\t\t%d\n"%self.run_number - cfgstring += "level1_id\t\t%d\n"%self.level1_id - if (self.run_number): cfgstring += "process_id\t\t%d\n"%self.process_id + cfg_list.append(["node_id", str(self.node_id)]) + cfg_list.append(["node_ip", self.node_ip]) - cfgstring += "node_id\t\t\t%d\n"%self.node_id - cfgstring += "node_ip\t\t\t%s\n"%self.node_ip + cfg_list.append(["config_file", self.config_file]) + cfg_list.append(["log_file", self.log_file]) - cfgstring += "config_file\t\t%s\n"%self.config_file - cfgstring += "log_file\t\t%s\n"%self.log_file + cfg_list.append(["input_stream", self.input_stream]) + cfg_list.append(["output_dir", self.output_dir]) + cfg_list.append(["output_header", self.output_header]) - cfgstring += "input_stream\t\t%s\n"%self.input_stream - cfgstring += "output_dir\t\t%s\n"%self.output_dir - cfgstring += "output_header\t\t%s\n"%self.output_header + cfg_list.append(["max_events", str(self.max_events)]) - cfgstring += "max_events\t\t%d\n"%self.max_events + return cfg_list + def format_config(self): + + cfgstring = "" + for cfg in self.config_list(): cfgstring += "%-30s %s\n"%cfg return cfgstring + #cfgstring = "" + #cfgstring += "daq_dir\t\t\t%s\n"%self.daq_dir + #cfgstring += "ssh_id_file\t\t%s\n"%self.ssh_id_file + #cfgstring += "executable\t\t%s\n"%self.executable + # + #cfgstring += "run_number\t\t%d\n"%self.run_number + #cfgstring += "level1_id\t\t%d\n"%self.level1_id + #if (self.run_number): cfgstring += "process_id\t\t%d\n"%self.process_id + # + #cfgstring += "node_id\t\t\t%d\n"%self.node_id + #cfgstring += "node_ip\t\t\t%s\n"%self.node_ip + # + #cfgstring += "config_file\t\t%s\n"%self.config_file + #cfgstring += "log_file\t\t%s\n"%self.log_file + # + #cfgstring += "input_stream\t\t%s\n"%self.input_stream + #cfgstring += "output_dir\t\t%s\n"%self.output_dir + #cfgstring += "output_header\t\t%s\n"%self.output_header + # + #cfgstring += "max_events\t\t%d\n"%self.max_events + # + #return cfgstring + def write_config(self): if self.config_file == "unset": @@ -86,24 +117,28 @@ def create_level1(self): print "Level1::create_level1 - ERROR: unable to create new Level1 process in DB" return "error" - self.db.add_cfg_para_proc(self.process_id,"daq_dir", self.daq_dir) - self.db.add_cfg_para_proc(self.process_id,"ssh_id_file", self.ssh_id_file) - self.db.add_cfg_para_proc(self.process_id,"executable", self.executable) - - #self.db.add_cfg_para_proc(self.process_id,"run_number", repr(self.run_number)) - self.db.add_cfg_para_proc(self.process_id,"level1_id", repr(self.level1_id)) - - #self.db.add_cfg_para_proc(self.process_id,"node_id", repr(self.node_id)) - self.db.add_cfg_para_proc(self.process_id,"node_ip", self.node_ip) - - self.db.add_cfg_para_proc(self.process_id,"config_file", self.config_file) - self.db.add_cfg_para_proc(self.process_id,"log_file", self.log_file) - - self.db.add_cfg_para_proc(self.process_id,"input_stream", self.input_stream) - self.db.add_cfg_para_proc(self.process_id,"output_dir", self.output_dir) - self.db.add_cfg_para_proc(self.process_id,"output_header",self.output_header) - - self.db.add_cfg_para_proc(self.process_id,"max_events", repr(self.max_events)) + # Add all configuration parameters + for cfg in self.config_list(): + self.db.add_cfg_para_proc(self.process_id,cfg[0],cfg[1]) + + #self.db.add_cfg_para_proc(self.process_id,"daq_dir", self.daq_dir) + #self.db.add_cfg_para_proc(self.process_id,"ssh_id_file", self.ssh_id_file) + #self.db.add_cfg_para_proc(self.process_id,"executable", self.executable) + # + ##self.db.add_cfg_para_proc(self.process_id,"run_number", repr(self.run_number)) + #self.db.add_cfg_para_proc(self.process_id,"level1_id", repr(self.level1_id)) + # + ##self.db.add_cfg_para_proc(self.process_id,"node_id", repr(self.node_id)) + #self.db.add_cfg_para_proc(self.process_id,"node_ip", self.node_ip) + # + #self.db.add_cfg_para_proc(self.process_id,"config_file", self.config_file) + #self.db.add_cfg_para_proc(self.process_id,"log_file", self.log_file) + # + #self.db.add_cfg_para_proc(self.process_id,"input_stream", self.input_stream) + #self.db.add_cfg_para_proc(self.process_id,"output_dir", self.output_dir) + #self.db.add_cfg_para_proc(self.process_id,"output_header",self.output_header) + # + #self.db.add_cfg_para_proc(self.process_id,"max_events", repr(self.max_events)) return "ok" diff --git a/RunControl/code/Merger.py b/RunControl/code/Merger.py index d01175cc..0b9168e4 100644 --- a/RunControl/code/Merger.py +++ b/RunControl/code/Merger.py @@ -39,29 +39,58 @@ def set_default_config(self): self.input_list = "undefined" self.output_list = "undefined" - def format_config(self): + def config_list(self): - cfgstring = "" - cfgstring += "daq_dir\t\t\t%s\n"%self.daq_dir - cfgstring += "ssh_id_file\t\t%s\n"%self.ssh_id_file - cfgstring += "executable\t\t%s\n"%self.executable + cfg_list = [] + + cfg_list.append(["daq_dir", self.daq_dir]) + cfg_list.append(["ssh_id_file", self.ssh_id_file]) + cfg_list.append(["executable", self.executable]) - cfgstring += "run_number\t\t%d\n"%self.run_number - if (self.run_number): cfgstring += "process_id\t\t%d\n"%self.process_id + cfg_list.append(["run_number", str(self.run_number)]) + if (self.run_number): + cfg_list.append(["process_id", str(self.process_id)]) - cfgstring += "node_id\t\t\t%d\n"%self.node_id - cfgstring += "node_ip\t\t\t%s\n"%self.node_ip + cfg_list.append(["node_id", str(self.node_id)]) + cfg_list.append(["node_ip", self.node_ip]) - cfgstring += "config_file\t\t%s\n"%self.config_file - cfgstring += "log_file\t\t%s\n"%self.log_file + cfg_list.append(["config_file", self.config_file]) + cfg_list.append(["log_file", self.log_file]) - #cfgstring += "output_mode\t\t%s\n"%self.output_mode + #cfg_list.append(["output_mode", self.output_mode]) - cfgstring += "input_list\t\t%s\n"%self.input_list - cfgstring += "output_list\t\t%s\n"%self.output_list + cfg_list.append(["input_list", self.input_list]) + cfg_list.append(["output_list", self.output_list]) + + return cfg_list + + def format_config(self): + cfgstring = "" + for cfg in self.config_list(): cfgstring += "%-30s %s\n"%cfg return cfgstring + #cfgstring = "" + #cfgstring += "daq_dir\t\t\t%s\n"%self.daq_dir + #cfgstring += "ssh_id_file\t\t%s\n"%self.ssh_id_file + #cfgstring += "executable\t\t%s\n"%self.executable + # + #cfgstring += "run_number\t\t%d\n"%self.run_number + #if (self.run_number): cfgstring += "process_id\t\t%d\n"%self.process_id + # + #cfgstring += "node_id\t\t\t%d\n"%self.node_id + #cfgstring += "node_ip\t\t\t%s\n"%self.node_ip + # + #cfgstring += "config_file\t\t%s\n"%self.config_file + #cfgstring += "log_file\t\t%s\n"%self.log_file + # + ##cfgstring += "output_mode\t\t%s\n"%self.output_mode + # + #cfgstring += "input_list\t\t%s\n"%self.input_list + #cfgstring += "output_list\t\t%s\n"%self.output_list + # + #return cfgstring + def write_config(self): if self.config_file == "unset": @@ -83,22 +112,26 @@ def create_merger(self): print "Merger::create_merger - ERROR: unable to create new Merger process in DB" return "error" - self.db.add_cfg_para_proc(self.process_id,"daq_dir", self.daq_dir) - self.db.add_cfg_para_proc(self.process_id,"ssh_id_file",self.ssh_id_file) - self.db.add_cfg_para_proc(self.process_id,"executable", self.executable) - - #self.db.add_cfg_para_proc(self.process_id,"run_number", repr(self.run_number)) - - #self.db.add_cfg_para_proc(self.process_id,"node_id", repr(self.node_id)) - self.db.add_cfg_para_proc(self.process_id,"node_ip", self.node_ip) - - self.db.add_cfg_para_proc(self.process_id,"config_file",self.config_file) - self.db.add_cfg_para_proc(self.process_id,"log_file", self.log_file) - - #self.db.add_cfg_para_proc(self.process_id,"output_mode",self.output_mode) - - self.db.add_cfg_para_proc(self.process_id,"input_list", self.input_list) - self.db.add_cfg_para_proc(self.process_id,"output_list",self.output_list) + # Add all configuration parameters + for cfg in self.config_list(): + self.db.add_cfg_para_proc(self.process_id,cfg[0],cfg[1]) + + #self.db.add_cfg_para_proc(self.process_id,"daq_dir", self.daq_dir) + #self.db.add_cfg_para_proc(self.process_id,"ssh_id_file",self.ssh_id_file) + #self.db.add_cfg_para_proc(self.process_id,"executable", self.executable) + # + ##self.db.add_cfg_para_proc(self.process_id,"run_number", repr(self.run_number)) + # + ##self.db.add_cfg_para_proc(self.process_id,"node_id", repr(self.node_id)) + #self.db.add_cfg_para_proc(self.process_id,"node_ip", self.node_ip) + # + #self.db.add_cfg_para_proc(self.process_id,"config_file",self.config_file) + #self.db.add_cfg_para_proc(self.process_id,"log_file", self.log_file) + # + ##self.db.add_cfg_para_proc(self.process_id,"output_mode",self.output_mode) + # + #self.db.add_cfg_para_proc(self.process_id,"input_list", self.input_list) + #self.db.add_cfg_para_proc(self.process_id,"output_list",self.output_list) return "ok" diff --git a/RunControl/code/Run.py b/RunControl/code/Run.py index 3e00079e..d0f40b0a 100644 --- a/RunControl/code/Run.py +++ b/RunControl/code/Run.py @@ -241,36 +241,36 @@ def read_setup(self): return "ok" - def format_config(self): - - cfgstring = "" - - cfgstring += "user_account\t\t%s\n"%self.user_account - cfgstring += "daq_dir\t\t\t%s\n"%self.daq_dir - cfgstring += "base_port_number\t\t%s\n"%self.base_port_number - cfgstring += "ssh_id_file\t\t%s\n"%self.ssh_id_file - - cfgstring += "daq_executable\t\t%s\n"%self.daq_executable - cfgstring += "trigger_executable\t%s\n"%self.trigger_executable - cfgstring += "merger_executable\t%s\n"%self.merger_executable - cfgstring += "level1_executable\t%s\n"%self.level1_executable - - cfgstring += "start_file\t\t%s\n"%self.start_file - cfgstring += "quit_file\t\t%s\n"%self.quit_file - cfgstring += "trig_start_file\t\t%s\n"%self.trig_start_file - cfgstring += "trig_stop_file\t\t%s\n"%self.trig_stop_file - cfgstring += "initok_file_head\t%s\n"%self.initok_file_head - cfgstring += "initfail_file_head\t%s\n"%self.initfail_file_head - cfgstring += "lock_file_head\t\t%s\n"%self.lock_file_head - cfgstring += "rawdata_dir\t\t%s\n"%self.rawdata_dir - - cfgstring += "run_number\t\t%d\n"%self.run_number - cfgstring += "run_name\t\t%s\n"%self.run_name - cfgstring += "run_dir\t\t\t%s\n"%self.run_dir - cfgstring += "run_type\t\t%s\n"%self.run_type - cfgstring += "run_user\t\t%s\n"%self.run_user - cfgstring += "run_comment_start\t%s\n"%self.run_comment_start - cfgstring += "setup\t\t\t%s\n"%self.setup + def config_list(self): + + cfg_list = [] + + cfg_list.append(["user_account", self.user_account]) + cfg_list.append(["daq_dir", self.daq_dir]) + cfg_list.append(["base_port_number", self.base_port_number]) + cfg_list.append(["ssh_id_file", self.ssh_id_file]) + + cfg_list.append(["daq_executable", self.daq_executable]) + cfg_list.append(["trigger_executable",self.trigger_executable]) + cfg_list.append(["merger_executable", self.merger_executable]) + cfg_list.append(["level1_executable", self.level1_executable]) + + cfg_list.append(["start_file", self.start_file]) + cfg_list.append(["quit_file", self.quit_file]) + cfg_list.append(["trig_start_file", self.trig_start_file]) + cfg_list.append(["trig_stop_file", self.trig_stop_file]) + cfg_list.append(["initok_file_head", self.initok_file_head]) + cfg_list.append(["initfail_file_head",self.initfail_file_head]) + cfg_list.append(["lock_file_head", self.lock_file_head]) + cfg_list.append(["rawdata_dir", self.rawdata_dir]) + + cfg_list.append(["run_number", str(self.run_number)]) + cfg_list.append(["run_name", self.run_name]) + cfg_list.append(["run_dir", self.run_dir]) + cfg_list.append(["run_type", self.run_type]) + cfg_list.append(["run_user", self.run_user]) + cfg_list.append(["run_comment_start", self.run_comment_start]) + cfg_list.append(["setup", self.setup]) s_board_list = "" for b in self.boardid_list: @@ -278,112 +278,193 @@ def format_config(self): s_board_list += " %d"%b else: s_board_list = "%d"%b - cfgstring += "board_list\t\t%s\n"%s_board_list + cfg_list.append(["board_list",s_board_list]) for b in self.boardid_list: for link in self.board_link_list: (board,host,port,node) = link if b == int(board): board_link = "%s %s %s %s"%(board,host,port,node) - cfgstring += "board_link\t\t%s\n"%board_link + cfg_list.append(["board_link","%s %s %s %s"%(board,host,port,node)]) - cfgstring += "config_dir\t\t%s\n"%self.config_dir - cfgstring += "config_file\t\t%s\n"%self.config_file - cfgstring += "config_file_head\t%s\n"%self.config_file_head + cfg_list.append(["config_dir", self.config_dir]) + cfg_list.append(["config_file", self.config_file]) + cfg_list.append(["config_file_head",self.config_file_head]) - cfgstring += "log_dir\t\t\t%s\n"%self.log_dir - cfgstring += "log_file_head\t\t%s\n"%self.log_file_head + cfg_list.append(["log_dir", self.log_dir]) + cfg_list.append(["log_file_head", self.log_file_head]) - cfgstring += "stream_dir\t\t%s\n"%self.stream_dir - cfgstring += "stream_head\t\t%s\n"%self.stream_head + cfg_list.append(["stream_dir", self.stream_dir]) + cfg_list.append(["stream_head", self.stream_head]) - cfgstring += "rawdata_dir\t\t%s\n"%self.rawdata_dir - cfgstring += "rawdata_head\t\t%s\n"%self.rawdata_head + cfg_list.append(["rawdata_dir", self.rawdata_dir]) + cfg_list.append(["rawdata_head", self.rawdata_head]) - cfgstring += "trigger_node\t\t%s\n"%self.trigger_node - #cfgstring += "trigger_mask\t\t%s\n"%self.trigger_mask + cfg_list.append(["trigger_node", self.trigger_node]) if self.merger_node: - cfgstring += "merger_node\t\t%s\n"%self.merger_node + cfg_list.append(["merger_node", self.merger_node)] if self.merger_node_list: - cfgstring += "merger_node_list\t\t%s\n"%" ".join(self.merger_node_list) - - cfgstring += "level1_nproc\t\t%d\n"%self.level1_nproc - cfgstring += "level1_maxevt\t\t%d\n"%self.level1_maxevt - - cfgstring += "total_daq_time\t\t"+str(self.total_daq_time)+"\n" - - return cfgstring - - def create_run_in_db(self): - - # Create run in DB and save its configuration parameters - - self.db.create_run(self.run_number,self.run_name,self.run_user,self.run_type,self.run_comment_start) - - self.db.add_cfg_para_run(self.run_number,"user_account", self.user_account) - self.db.add_cfg_para_run(self.run_number,"daq_dir", self.daq_dir) - self.db.add_cfg_para_run(self.run_number,"base_port_number", self.base_port_number) - self.db.add_cfg_para_run(self.run_number,"ssh_id_file", self.ssh_id_file) - - self.db.add_cfg_para_run(self.run_number,"daq_executable", self.daq_executable) - self.db.add_cfg_para_run(self.run_number,"trigger_executable", self.trigger_executable) - self.db.add_cfg_para_run(self.run_number,"merger_executable", self.merger_executable) - self.db.add_cfg_para_run(self.run_number,"level1_executable", self.level1_executable) - - self.db.add_cfg_para_run(self.run_number,"start_file", self.start_file) - self.db.add_cfg_para_run(self.run_number,"quit_file", self.quit_file) - self.db.add_cfg_para_run(self.run_number,"trig_start_file", self.trig_start_file) - self.db.add_cfg_para_run(self.run_number,"trig_stop_file", self.trig_stop_file) - self.db.add_cfg_para_run(self.run_number,"initok_file_head", self.initok_file_head) - self.db.add_cfg_para_run(self.run_number,"initfail_file_head", self.initfail_file_head) - self.db.add_cfg_para_run(self.run_number,"lock_file_head", self.lock_file_head) - - self.db.add_cfg_para_run(self.run_number,"run_name",self.run_name) - self.db.add_cfg_para_run(self.run_number,"run_dir",self.run_dir) - self.db.add_cfg_para_run(self.run_number,"setup",self.setup) - - s_board_list = "" - for b in self.boardid_list: - if (s_board_list): - s_board_list += " %d"%b - else: - s_board_list = "%d"%b - self.db.add_cfg_para_run(self.run_number,"board_list",s_board_list) - - for b in self.boardid_list: - for link in self.board_link_list: - (board,host,port,node) = link - if b == int(board): - board_link = "%s %s %s %s"%(board,host,port,node) - self.db.add_cfg_para_run(self.run_number,"board_link",board_link) - - self.db.add_cfg_para_run(self.run_number,"config_dir", self.config_dir) - self.db.add_cfg_para_run(self.run_number,"config_file", self.config_file) - self.db.add_cfg_para_run(self.run_number,"config_file_head", self.config_file_head) + cfg_list.append(["merger_node_list"," ".join(self.merger_node_list)]) - self.db.add_cfg_para_run(self.run_number,"log_dir", self.log_dir) - self.db.add_cfg_para_run(self.run_number,"log_file_head", self.log_file_head) + cfg_list.append(["level1_nproc", str(self.level1_nproc)]) + cfg_list.append(["level1_maxevt", str(self.level1_maxevt)]) - self.db.add_cfg_para_run(self.run_number,"stream_dir", self.stream_dir) - self.db.add_cfg_para_run(self.run_number,"stream_head", self.stream_head) + cfg_list.append(["total_daq_time", str(self.total_daq_time)]) - self.db.add_cfg_para_run(self.run_number,"rawdata_dir", self.rawdata_dir) - self.db.add_cfg_para_run(self.run_number,"rawdata_head", self.rawdata_head) + return cfg_list - self.db.add_cfg_para_run(self.run_number,"trigger_node", self.trigger_node) + def format_config(self): - if self.merger_node: - self.db.add_cfg_para_run(self.run_number,"merger_node", self.merger_node) + cfgstring = "" + for cfg in self.config_list(): cfgstring += "%-30s %s\n"%cfg + return cfgstring - if self.merger_node_list: - self.db.add_cfg_para_run(self.run_number,"merger_node_list", " ".join(self.merger_node_list)) + #cfgstring += "user_account\t\t%s\n"%self.user_account + #cfgstring += "daq_dir\t\t\t%s\n"%self.daq_dir + #cfgstring += "base_port_number\t\t%s\n"%self.base_port_number + #cfgstring += "ssh_id_file\t\t%s\n"%self.ssh_id_file + # + #cfgstring += "daq_executable\t\t%s\n"%self.daq_executable + #cfgstring += "trigger_executable\t%s\n"%self.trigger_executable + #cfgstring += "merger_executable\t%s\n"%self.merger_executable + #cfgstring += "level1_executable\t%s\n"%self.level1_executable + # + #cfgstring += "start_file\t\t%s\n"%self.start_file + #cfgstring += "quit_file\t\t%s\n"%self.quit_file + #cfgstring += "trig_start_file\t\t%s\n"%self.trig_start_file + #cfgstring += "trig_stop_file\t\t%s\n"%self.trig_stop_file + #cfgstring += "initok_file_head\t%s\n"%self.initok_file_head + #cfgstring += "initfail_file_head\t%s\n"%self.initfail_file_head + #cfgstring += "lock_file_head\t\t%s\n"%self.lock_file_head + #cfgstring += "rawdata_dir\t\t%s\n"%self.rawdata_dir + # + #cfgstring += "run_number\t\t%d\n"%self.run_number + #cfgstring += "run_name\t\t%s\n"%self.run_name + #cfgstring += "run_dir\t\t\t%s\n"%self.run_dir + #cfgstring += "run_type\t\t%s\n"%self.run_type + #cfgstring += "run_user\t\t%s\n"%self.run_user + #cfgstring += "run_comment_start\t%s\n"%self.run_comment_start + #cfgstring += "setup\t\t\t%s\n"%self.setup + # + #s_board_list = "" + #for b in self.boardid_list: + # if (s_board_list): + # s_board_list += " %d"%b + # else: + # s_board_list = "%d"%b + #cfgstring += "board_list\t\t%s\n"%s_board_list + # + #for b in self.boardid_list: + # for link in self.board_link_list: + # (board,host,port,node) = link + # if b == int(board): + # board_link = "%s %s %s %s"%(board,host,port,node) + # cfgstring += "board_link\t\t%s\n"%board_link + # + #cfgstring += "config_dir\t\t%s\n"%self.config_dir + #cfgstring += "config_file\t\t%s\n"%self.config_file + #cfgstring += "config_file_head\t%s\n"%self.config_file_head + # + #cfgstring += "log_dir\t\t\t%s\n"%self.log_dir + #cfgstring += "log_file_head\t\t%s\n"%self.log_file_head + # + #cfgstring += "stream_dir\t\t%s\n"%self.stream_dir + #cfgstring += "stream_head\t\t%s\n"%self.stream_head + # + #cfgstring += "rawdata_dir\t\t%s\n"%self.rawdata_dir + #cfgstring += "rawdata_head\t\t%s\n"%self.rawdata_head + # + #cfgstring += "trigger_node\t\t%s\n"%self.trigger_node + ##cfgstring += "trigger_mask\t\t%s\n"%self.trigger_mask + # + #if self.merger_node: + # cfgstring += "merger_node\t\t%s\n"%self.merger_node + # + #if self.merger_node_list: + # cfgstring += "merger_node_list\t\t%s\n"%" ".join(self.merger_node_list) + # + #cfgstring += "level1_nproc\t\t%d\n"%self.level1_nproc + #cfgstring += "level1_maxevt\t\t%d\n"%self.level1_maxevt + # + #cfgstring += "total_daq_time\t\t"+str(self.total_daq_time)+"\n" + # + #return cfgstring - self.db.add_cfg_para_run(self.run_number,"level1_nproc", str(self.level1_nproc)) - self.db.add_cfg_para_run(self.run_number,"level1_maxevt", str(self.level1_maxevt)) + def create_run_in_db(self): - self.db.add_cfg_para_run(self.run_number,"total_daq_time", self.total_daq_time) + # Create run in DB + self.db.create_run(self.run_number,self.run_name,self.run_type) + self.db.set_run_user(self.run_number,self.run_user) + self.db.set_run_comment_start(self.run_number,self.run_comment_start) + + # Add all configuration parameters + for cfg in self.config_list(): + self.db.add_cfg_para_run(self.run_number,cfg[0],cfg[1]) + + #self.db.add_cfg_para_run(self.run_number,"user_account", self.user_account) + #self.db.add_cfg_para_run(self.run_number,"daq_dir", self.daq_dir) + #self.db.add_cfg_para_run(self.run_number,"base_port_number", self.base_port_number) + #self.db.add_cfg_para_run(self.run_number,"ssh_id_file", self.ssh_id_file) + # + #self.db.add_cfg_para_run(self.run_number,"daq_executable", self.daq_executable) + #self.db.add_cfg_para_run(self.run_number,"trigger_executable", self.trigger_executable) + #self.db.add_cfg_para_run(self.run_number,"merger_executable", self.merger_executable) + #self.db.add_cfg_para_run(self.run_number,"level1_executable", self.level1_executable) + # + #self.db.add_cfg_para_run(self.run_number,"start_file", self.start_file) + #self.db.add_cfg_para_run(self.run_number,"quit_file", self.quit_file) + #self.db.add_cfg_para_run(self.run_number,"trig_start_file", self.trig_start_file) + #self.db.add_cfg_para_run(self.run_number,"trig_stop_file", self.trig_stop_file) + #self.db.add_cfg_para_run(self.run_number,"initok_file_head", self.initok_file_head) + #self.db.add_cfg_para_run(self.run_number,"initfail_file_head", self.initfail_file_head) + #self.db.add_cfg_para_run(self.run_number,"lock_file_head", self.lock_file_head) + # + #self.db.add_cfg_para_run(self.run_number,"run_name",self.run_name) + #self.db.add_cfg_para_run(self.run_number,"run_dir",self.run_dir) + #self.db.add_cfg_para_run(self.run_number,"setup",self.setup) + # + #s_board_list = "" + #for b in self.boardid_list: + # if (s_board_list): + # s_board_list += " %d"%b + # else: + # s_board_list = "%d"%b + #self.db.add_cfg_para_run(self.run_number,"board_list",s_board_list) + # + #for b in self.boardid_list: + # for link in self.board_link_list: + # (board,host,port,node) = link + # if b == int(board): + # board_link = "%s %s %s %s"%(board,host,port,node) + # self.db.add_cfg_para_run(self.run_number,"board_link",board_link) + # + #self.db.add_cfg_para_run(self.run_number,"config_dir", self.config_dir) + #self.db.add_cfg_para_run(self.run_number,"config_file", self.config_file) + #self.db.add_cfg_para_run(self.run_number,"config_file_head", self.config_file_head) + # + #self.db.add_cfg_para_run(self.run_number,"log_dir", self.log_dir) + #self.db.add_cfg_para_run(self.run_number,"log_file_head", self.log_file_head) + # + #self.db.add_cfg_para_run(self.run_number,"stream_dir", self.stream_dir) + #self.db.add_cfg_para_run(self.run_number,"stream_head", self.stream_head) + # + #self.db.add_cfg_para_run(self.run_number,"rawdata_dir", self.rawdata_dir) + #self.db.add_cfg_para_run(self.run_number,"rawdata_head", self.rawdata_head) + # + #self.db.add_cfg_para_run(self.run_number,"trigger_node", self.trigger_node) + # + #if self.merger_node: + # self.db.add_cfg_para_run(self.run_number,"merger_node", self.merger_node) + # + #if self.merger_node_list: + # self.db.add_cfg_para_run(self.run_number,"merger_node_list", " ".join(self.merger_node_list)) + # + #self.db.add_cfg_para_run(self.run_number,"level1_nproc", str(self.level1_nproc)) + #self.db.add_cfg_para_run(self.run_number,"level1_maxevt", str(self.level1_maxevt)) + # + #self.db.add_cfg_para_run(self.run_number,"total_daq_time", self.total_daq_time) # Create board structures in DB for adc in (self.adcboard_list): diff --git a/RunControl/code/Trigger.py b/RunControl/code/Trigger.py index e51d400d..09ae3809 100644 --- a/RunControl/code/Trigger.py +++ b/RunControl/code/Trigger.py @@ -152,91 +152,182 @@ def read_setup(self,setup): print l f.close() - def format_config(self): + def config_list(self): - cfgstring = "" - cfgstring += "daq_dir\t\t\t\t%s\n"%self.daq_dir - cfgstring += "ssh_id_file\t\t\t%s\n"%self.ssh_id_file - cfgstring += "executable\t\t\t%s\n"%self.executable - cfgstring += "start_file\t\t\t%s\n"%self.start_file - cfgstring += "quit_file\t\t\t%s\n"%self.quit_file - - cfgstring += "run_number\t\t\t%d\n"%self.run_number - #cfgstring += "process_mode\t\t\t%s\n"%self.process_mode - if (self.run_number): cfgstring += "process_id\t\t\t%d\n"%self.process_id - - cfgstring += "node_id\t\t\t\t%d\n"%self.node_id - cfgstring += "node_ip\t\t\t\t%s\n"%self.node_ip - - cfgstring += "config_file\t\t\t%s\n"%self.config_file - cfgstring += "log_file\t\t\t%s\n"%self.log_file - cfgstring += "lock_file\t\t\t%s\n"%self.lock_file - cfgstring += "initok_file\t\t\t%s\n"%self.initok_file - cfgstring += "initfail_file\t\t\t%s\n"%self.initfail_file - - cfgstring += "output_mode\t\t\t%s\n"%self.output_mode + cfg_list = [] + + cfg_list.append(["daq_dir", self.daq_dir]) + cfg_list.append(["ssh_id_file", self.ssh_id_file]) + cfg_list.append(["executable", self.executable]) + cfg_list.append(["start_file", self.start_file]) + cfg_list.append(["quit_file", self.quit_file]) + + cfg_list.append(["run_number", str(self.run_number)]) + #cfg_list.append(["process_mode", self.process_mode]) + if (self.run_number): + cfg_list.append(["process_id", str(self.process_id)]) + + cfg_list.append(["node_id", str(self.node_id)]) + cfg_list.append(["node_ip", self.node_ip]) + + cfg_list.append(["config_file", self.config_file]) + cfg_list.append(["log_file", self.log_file]) + cfg_list.append(["lock_file", self.lock_file]) + cfg_list.append(["initok_file", self.initok_file]) + cfg_list.append(["initfail_file", self.initfail_file]) + + cfg_list.append(["output_mode", self.output_mode]) if self.output_mode == "STREAM": - cfgstring += "output_stream\t\t\t%s\n"%self.output_stream + cfg_list.append(["output_stream", self.output_stream]) elif self.output_mode == "FILE": - cfgstring += "data_dir\t\t\t%s\n"%self.data_dir - cfgstring += "data_file\t\t\t%s\n"%self.data_file - cfgstring += "file_max_duration\t\t%d\n"%self.file_max_duration - cfgstring += "file_max_size\t\t\t%d\n"%self.file_max_size - cfgstring += "file_max_events\t\t%d\n"%self.file_max_events + cfg_list.append(["data_dir", self.data_dir]) + cfg_list.append(["data_file", self.data_file]) + cfg_list.append(["file_max_duration", str(self.file_max_duration)]) + cfg_list.append(["file_max_size", str(self.file_max_size)]) + cfg_list.append(["file_max_events", str(self.file_max_events)]) - cfgstring += "total_daq_time\t\t\t%d\n"%self.total_daq_time + cfg_list.append(["total_daq_time", str(self.total_daq_time)]) - cfgstring += "trigger_addr\t\t\t%s\n"%self.trigger_addr - cfgstring += "trigger_port\t\t\t%d\n"%self.trigger_port + cfg_list.append(["trigger_addr", self.trigger_addr]) + cfg_list.append(["trigger_port", str(self.trigger_port)]) - cfgstring += "trigger_mask\t\t\t0x%02x\n"%self.trigger_mask - cfgstring += "busy_mask\t\t\t0x%02x\n"%self.busy_mask + cfg_list.append(["trigger_mask", "%#02x"%self.trigger_mask]) + cfg_list.append(["busy_mask", "%#02x"%self.busy_mask]) - cfgstring += "trigger0_delay\t0x%02x\n"%self.trigger0_delay + cfg_list.append(["trigger0_delay", "%#02x"%self.trigger0_delay]) - cfgstring += "correlated_trigger_delay\t0x%04x\n"%self.correlated_trigger_delay + cfg_list.append(["correlated_trigger_delay", "%#04x"%self.correlated_trigger_delay]) if (self.trigger_mask & 0x01): - #cfgstring += "trig0_scale_global\t\t%d\n"%self.trig0_scale_global - cfgstring += "trig0_scale_autopass\t\t%d\n"%self.trig0_scale_autopass + #cfg_list.append(["trig0_scale_global", str(self.trig0_scale_global)]) + cfg_list.append(["trig0_scale_autopass", str(self.trig0_scale_autopass)]) if (self.trigger_mask & 0x02): - cfgstring += "trig1_scale_global\t\t%d\n"%self.trig1_scale_global - cfgstring += "trig1_scale_autopass\t\t%d\n"%self.trig1_scale_autopass + cfg_list.append(["trig1_scale_global", str(self.trig1_scale_global)]) + cfg_list.append(["trig1_scale_autopass", str(self.trig1_scale_autopass)]) if (self.trigger_mask & 0x04): - cfgstring += "trig2_scale_global\t\t%d\n"%self.trig2_scale_global - cfgstring += "trig2_scale_autopass\t\t%d\n"%self.trig2_scale_autopass + cfg_list.append(["trig2_scale_global", str(self.trig2_scale_global)]) + cfg_list.append(["trig2_scale_autopass", str(self.trig2_scale_autopass)]) if (self.trigger_mask & 0x08): - cfgstring += "trig3_scale_global\t\t%d\n"%self.trig3_scale_global - cfgstring += "trig3_scale_autopass\t\t%d\n"%self.trig3_scale_autopass + cfg_list.append(["trig3_scale_global", str(self.trig3_scale_global)]) + cfg_list.append(["trig3_scale_autopass", str(self.trig3_scale_autopass)]) if (self.trigger_mask & 0x10): - cfgstring += "trig4_scale_global\t\t%d\n"%self.trig4_scale_global - cfgstring += "trig4_scale_autopass\t\t%d\n"%self.trig4_scale_autopass + cfg_list.append(["trig4_scale_global", str(self.trig4_scale_global)]) + cfg_list.append(["trig4_scale_autopass", str(self.trig4_scale_autopass)]) if (self.trigger_mask & 0x20): - cfgstring += "trig5_scale_global\t\t%d\n"%self.trig5_scale_global - cfgstring += "trig5_scale_autopass\t\t%d\n"%self.trig5_scale_autopass + cfg_list.append(["trig5_scale_global", str(self.trig5_scale_global)]) + cfg_list.append(["trig5_scale_autopass", str(self.trig5_scale_autopass)]) if (self.trigger_mask & 0x40): - cfgstring += "trig6_scale_global\t\t%d\n"%self.trig6_scale_global - cfgstring += "trig6_scale_autopass\t\t%d\n"%self.trig6_scale_autopass + cfg_list.append(["trig6_scale_global", str(self.trig6_scale_global)]) + cfg_list.append(["trig6_scale_autopass", str(self.trig6_scale_autopass)]) if (self.trigger_mask & 0x80): - cfgstring += "trig7_scale_global\t\t%d\n"%self.trig7_scale_global - cfgstring += "trig7_scale_autopass\t\t%d\n"%self.trig7_scale_autopass + cfg_list.append(["trig7_scale_global", str(self.trig7_scale_global)]) + cfg_list.append(["trig7_scale_autopass", str(self.trig7_scale_autopass)]) + + cfg_list.append(["timepix_shutter_delay", "%#02x"%self.timepix_shutter_delay]) + cfg_list.append(["timepix_shutter_width", "%#02x"%self.timepix_shutter_width]) - cfgstring += "timepix_shutter_delay\t\t0x%02x\n"%self.timepix_shutter_delay - cfgstring += "timepix_shutter_width\t\t0x%02x\n"%self.timepix_shutter_width + cfg_list.append(["daq_loop_delay", str(self.daq_loop_delay)]) - cfgstring += "daq_loop_delay\t\t\t%d\n"%self.daq_loop_delay + cfg_list.append(["debug_scale", str(self.debug_scale)]) - cfgstring += "debug_scale\t\t\t%d\n"%self.debug_scale + return cfg_list + + def format_config(self): + cfgstring = "" + for cfg in self.config_list(): cfgstring += "%-30s %s\n"%cfg return cfgstring + #cfgstring = "" + #cfgstring += "daq_dir\t\t\t\t%s\n"%self.daq_dir + #cfgstring += "ssh_id_file\t\t\t%s\n"%self.ssh_id_file + #cfgstring += "executable\t\t\t%s\n"%self.executable + #cfgstring += "start_file\t\t\t%s\n"%self.start_file + #cfgstring += "quit_file\t\t\t%s\n"%self.quit_file + # + #cfgstring += "run_number\t\t\t%d\n"%self.run_number + ##cfgstring += "process_mode\t\t\t%s\n"%self.process_mode + #if (self.run_number): cfgstring += "process_id\t\t\t%d\n"%self.process_id + # + #cfgstring += "node_id\t\t\t\t%d\n"%self.node_id + #cfgstring += "node_ip\t\t\t\t%s\n"%self.node_ip + # + #cfgstring += "config_file\t\t\t%s\n"%self.config_file + #cfgstring += "log_file\t\t\t%s\n"%self.log_file + #cfgstring += "lock_file\t\t\t%s\n"%self.lock_file + #cfgstring += "initok_file\t\t\t%s\n"%self.initok_file + #cfgstring += "initfail_file\t\t\t%s\n"%self.initfail_file + # + #cfgstring += "output_mode\t\t\t%s\n"%self.output_mode + #if self.output_mode == "STREAM": + # cfgstring += "output_stream\t\t\t%s\n"%self.output_stream + #elif self.output_mode == "FILE": + # cfgstring += "data_dir\t\t\t%s\n"%self.data_dir + # cfgstring += "data_file\t\t\t%s\n"%self.data_file + # cfgstring += "file_max_duration\t\t%d\n"%self.file_max_duration + # cfgstring += "file_max_size\t\t\t%d\n"%self.file_max_size + # cfgstring += "file_max_events\t\t%d\n"%self.file_max_events + # + #cfgstring += "total_daq_time\t\t\t%d\n"%self.total_daq_time + # + #cfgstring += "trigger_addr\t\t\t%s\n"%self.trigger_addr + #cfgstring += "trigger_port\t\t\t%d\n"%self.trigger_port + # + #cfgstring += "trigger_mask\t\t\t0x%02x\n"%self.trigger_mask + #cfgstring += "busy_mask\t\t\t0x%02x\n"%self.busy_mask + # + #cfgstring += "trigger0_delay\t0x%02x\n"%self.trigger0_delay + # + #cfgstring += "correlated_trigger_delay\t0x%04x\n"%self.correlated_trigger_delay + # + #if (self.trigger_mask & 0x01): + # #cfgstring += "trig0_scale_global\t\t%d\n"%self.trig0_scale_global + # cfgstring += "trig0_scale_autopass\t\t%d\n"%self.trig0_scale_autopass + # + #if (self.trigger_mask & 0x02): + # cfgstring += "trig1_scale_global\t\t%d\n"%self.trig1_scale_global + # cfgstring += "trig1_scale_autopass\t\t%d\n"%self.trig1_scale_autopass + # + #if (self.trigger_mask & 0x04): + # cfgstring += "trig2_scale_global\t\t%d\n"%self.trig2_scale_global + # cfgstring += "trig2_scale_autopass\t\t%d\n"%self.trig2_scale_autopass + # + #if (self.trigger_mask & 0x08): + # cfgstring += "trig3_scale_global\t\t%d\n"%self.trig3_scale_global + # cfgstring += "trig3_scale_autopass\t\t%d\n"%self.trig3_scale_autopass + # + #if (self.trigger_mask & 0x10): + # cfgstring += "trig4_scale_global\t\t%d\n"%self.trig4_scale_global + # cfgstring += "trig4_scale_autopass\t\t%d\n"%self.trig4_scale_autopass + # + #if (self.trigger_mask & 0x20): + # cfgstring += "trig5_scale_global\t\t%d\n"%self.trig5_scale_global + # cfgstring += "trig5_scale_autopass\t\t%d\n"%self.trig5_scale_autopass + # + #if (self.trigger_mask & 0x40): + # cfgstring += "trig6_scale_global\t\t%d\n"%self.trig6_scale_global + # cfgstring += "trig6_scale_autopass\t\t%d\n"%self.trig6_scale_autopass + # + #if (self.trigger_mask & 0x80): + # cfgstring += "trig7_scale_global\t\t%d\n"%self.trig7_scale_global + # cfgstring += "trig7_scale_autopass\t\t%d\n"%self.trig7_scale_autopass + # + #cfgstring += "timepix_shutter_delay\t\t0x%02x\n"%self.timepix_shutter_delay + #cfgstring += "timepix_shutter_width\t\t0x%02x\n"%self.timepix_shutter_width + # + #cfgstring += "daq_loop_delay\t\t\t%d\n"%self.daq_loop_delay + # + #cfgstring += "debug_scale\t\t\t%d\n"%self.debug_scale + # + #return cfgstring + def write_config(self): if self.config_file == "unset": @@ -259,84 +350,88 @@ def create_trigger(self): print "Trigger::create_trigger - ERROR: unable to create new Trigger process in DB" return "error" - self.db.add_cfg_para_proc(self.process_id,"daq_dir", self.daq_dir) - self.db.add_cfg_para_proc(self.process_id,"ssh_id_file", self.ssh_id_file) - self.db.add_cfg_para_proc(self.process_id,"executable", self.executable) - self.db.add_cfg_para_proc(self.process_id,"start_file", self.start_file) - self.db.add_cfg_para_proc(self.process_id,"quit_file", self.quit_file) - - #self.db.add_cfg_para_proc(self.process_id,"run_number", repr(self.run_number)) - #self.db.add_cfg_para_proc(self.process_id,"process_mode", self.process_mode) - - #self.db.add_cfg_para_proc(self.process_id,"node_id", repr(self.node_id)) - self.db.add_cfg_para_proc(self.process_id,"node_ip", self.node_ip) - - self.db.add_cfg_para_proc(self.process_id,"config_file", self.config_file) - self.db.add_cfg_para_proc(self.process_id,"log_file", self.log_file) - self.db.add_cfg_para_proc(self.process_id,"lock_file", self.lock_file) - self.db.add_cfg_para_proc(self.process_id,"initok_file", self.initok_file) - self.db.add_cfg_para_proc(self.process_id,"initfail_file", self.initfail_file) - - self.db.add_cfg_para_proc(self.process_id,"output_mode", self.output_mode) - if self.output_mode == "STREAM": - self.db.add_cfg_para_proc(self.process_id,"output_stream", self.output_stream) - elif self.output_mode == "FILE": - self.db.add_cfg_para_proc(self.process_id,"data_dir", self.data_dir) - self.db.add_cfg_para_proc(self.process_id,"data_file", self.data_file) - self.db.add_cfg_para_proc(self.process_id,"file_max_duration", self.file_max_duration) - self.db.add_cfg_para_proc(self.process_id,"file_max_size", self.file_max_size) - self.db.add_cfg_para_proc(self.process_id,"file_max_events", self.file_max_events) - - self.db.add_cfg_para_proc(self.process_id,"total_daq_time", repr(self.total_daq_time)) - - self.db.add_cfg_para_proc(self.process_id,"trigger_addr", self.trigger_addr) - self.db.add_cfg_para_proc(self.process_id,"trigger_port", repr(self.trigger_port)) - - self.db.add_cfg_para_proc(self.process_id,"trigger_mask", "%#02x"%self.trigger_mask) - self.db.add_cfg_para_proc(self.process_id,"busy_mask", "%#02x"%self.busy_mask) - - self.db.add_cfg_para_proc(self.process_id,"trigger0_delay", "%#02x"%self.trigger0_delay) - - self.db.add_cfg_para_proc(self.process_id,"correlated_trigger_delay", "%#04x"%self.correlated_trigger_delay) - - if (self.trigger_mask & 0x01): - #self.db.add_cfg_para_proc(self.process_id,"trig0_scale_global", repr(self.trig0_scale_global)) - self.db.add_cfg_para_proc(self.process_id,"trig0_scale_autopass", repr(self.trig0_scale_autopass)) - - if (self.trigger_mask & 0x02): - self.db.add_cfg_para_proc(self.process_id,"trig1_scale_global", repr(self.trig1_scale_global)) - self.db.add_cfg_para_proc(self.process_id,"trig1_scale_autopass", repr(self.trig1_scale_autopass)) - - if (self.trigger_mask & 0x04): - self.db.add_cfg_para_proc(self.process_id,"trig2_scale_global", repr(self.trig2_scale_global)) - self.db.add_cfg_para_proc(self.process_id,"trig2_scale_autopass", repr(self.trig2_scale_autopass)) - - if (self.trigger_mask & 0x08): - self.db.add_cfg_para_proc(self.process_id,"trig3_scale_global", repr(self.trig3_scale_global)) - self.db.add_cfg_para_proc(self.process_id,"trig3_scale_autopass", repr(self.trig3_scale_autopass)) - - if (self.trigger_mask & 0x10): - self.db.add_cfg_para_proc(self.process_id,"trig4_scale_global", repr(self.trig4_scale_global)) - self.db.add_cfg_para_proc(self.process_id,"trig4_scale_autopass", repr(self.trig4_scale_autopass)) - - if (self.trigger_mask & 0x20): - self.db.add_cfg_para_proc(self.process_id,"trig5_scale_global", repr(self.trig5_scale_global)) - self.db.add_cfg_para_proc(self.process_id,"trig5_scale_autopass", repr(self.trig5_scale_autopass)) - - if (self.trigger_mask & 0x40): - self.db.add_cfg_para_proc(self.process_id,"trig6_scale_global", repr(self.trig6_scale_global)) - self.db.add_cfg_para_proc(self.process_id,"trig6_scale_autopass", repr(self.trig6_scale_autopass)) - - if (self.trigger_mask & 0x80): - self.db.add_cfg_para_proc(self.process_id,"trig7_scale_global", repr(self.trig7_scale_global)) - self.db.add_cfg_para_proc(self.process_id,"trig7_scale_autopass", repr(self.trig7_scale_autopass)) - - self.db.add_cfg_para_proc(self.process_id,"timepix_shutter_delay", "%#02x"%self.timepix_shutter_delay) - self.db.add_cfg_para_proc(self.process_id,"timepix_shutter_width", "%#02x"%self.timepix_shutter_width) - - self.db.add_cfg_para_proc(self.process_id,"daq_loop_delay", repr(self.daq_loop_delay)) - - self.db.add_cfg_para_proc(self.process_id,"debug_scale", repr(self.debug_scale)) + # Add all configuration parameters + for cfg in self.config_list(): + self.db.add_cfg_para_proc(self.process_id,cfg[0],cfg[1]) + + #self.db.add_cfg_para_proc(self.process_id,"daq_dir", self.daq_dir) + #self.db.add_cfg_para_proc(self.process_id,"ssh_id_file", self.ssh_id_file) + #self.db.add_cfg_para_proc(self.process_id,"executable", self.executable) + #self.db.add_cfg_para_proc(self.process_id,"start_file", self.start_file) + #self.db.add_cfg_para_proc(self.process_id,"quit_file", self.quit_file) + # + ##self.db.add_cfg_para_proc(self.process_id,"run_number", repr(self.run_number)) + ##self.db.add_cfg_para_proc(self.process_id,"process_mode", self.process_mode) + # + ##self.db.add_cfg_para_proc(self.process_id,"node_id", repr(self.node_id)) + #self.db.add_cfg_para_proc(self.process_id,"node_ip", self.node_ip) + # + #self.db.add_cfg_para_proc(self.process_id,"config_file", self.config_file) + #self.db.add_cfg_para_proc(self.process_id,"log_file", self.log_file) + #self.db.add_cfg_para_proc(self.process_id,"lock_file", self.lock_file) + #self.db.add_cfg_para_proc(self.process_id,"initok_file", self.initok_file) + #self.db.add_cfg_para_proc(self.process_id,"initfail_file", self.initfail_file) + # + #self.db.add_cfg_para_proc(self.process_id,"output_mode", self.output_mode) + #if self.output_mode == "STREAM": + # self.db.add_cfg_para_proc(self.process_id,"output_stream", self.output_stream) + #elif self.output_mode == "FILE": + # self.db.add_cfg_para_proc(self.process_id,"data_dir", self.data_dir) + # self.db.add_cfg_para_proc(self.process_id,"data_file", self.data_file) + # self.db.add_cfg_para_proc(self.process_id,"file_max_duration", self.file_max_duration) + # self.db.add_cfg_para_proc(self.process_id,"file_max_size", self.file_max_size) + # self.db.add_cfg_para_proc(self.process_id,"file_max_events", self.file_max_events) + # + #self.db.add_cfg_para_proc(self.process_id,"total_daq_time", repr(self.total_daq_time)) + # + #self.db.add_cfg_para_proc(self.process_id,"trigger_addr", self.trigger_addr) + #self.db.add_cfg_para_proc(self.process_id,"trigger_port", repr(self.trigger_port)) + # + #self.db.add_cfg_para_proc(self.process_id,"trigger_mask", "%#02x"%self.trigger_mask) + #self.db.add_cfg_para_proc(self.process_id,"busy_mask", "%#02x"%self.busy_mask) + # + #self.db.add_cfg_para_proc(self.process_id,"trigger0_delay", "%#02x"%self.trigger0_delay) + # + #self.db.add_cfg_para_proc(self.process_id,"correlated_trigger_delay", "%#04x"%self.correlated_trigger_delay) + # + #if (self.trigger_mask & 0x01): + # #self.db.add_cfg_para_proc(self.process_id,"trig0_scale_global", repr(self.trig0_scale_global)) + # self.db.add_cfg_para_proc(self.process_id,"trig0_scale_autopass", repr(self.trig0_scale_autopass)) + # + #if (self.trigger_mask & 0x02): + # self.db.add_cfg_para_proc(self.process_id,"trig1_scale_global", repr(self.trig1_scale_global)) + # self.db.add_cfg_para_proc(self.process_id,"trig1_scale_autopass", repr(self.trig1_scale_autopass)) + # + #if (self.trigger_mask & 0x04): + # self.db.add_cfg_para_proc(self.process_id,"trig2_scale_global", repr(self.trig2_scale_global)) + # self.db.add_cfg_para_proc(self.process_id,"trig2_scale_autopass", repr(self.trig2_scale_autopass)) + # + #if (self.trigger_mask & 0x08): + # self.db.add_cfg_para_proc(self.process_id,"trig3_scale_global", repr(self.trig3_scale_global)) + # self.db.add_cfg_para_proc(self.process_id,"trig3_scale_autopass", repr(self.trig3_scale_autopass)) + # + #if (self.trigger_mask & 0x10): + # self.db.add_cfg_para_proc(self.process_id,"trig4_scale_global", repr(self.trig4_scale_global)) + # self.db.add_cfg_para_proc(self.process_id,"trig4_scale_autopass", repr(self.trig4_scale_autopass)) + # + #if (self.trigger_mask & 0x20): + # self.db.add_cfg_para_proc(self.process_id,"trig5_scale_global", repr(self.trig5_scale_global)) + # self.db.add_cfg_para_proc(self.process_id,"trig5_scale_autopass", repr(self.trig5_scale_autopass)) + # + #if (self.trigger_mask & 0x40): + # self.db.add_cfg_para_proc(self.process_id,"trig6_scale_global", repr(self.trig6_scale_global)) + # self.db.add_cfg_para_proc(self.process_id,"trig6_scale_autopass", repr(self.trig6_scale_autopass)) + # + #if (self.trigger_mask & 0x80): + # self.db.add_cfg_para_proc(self.process_id,"trig7_scale_global", repr(self.trig7_scale_global)) + # self.db.add_cfg_para_proc(self.process_id,"trig7_scale_autopass", repr(self.trig7_scale_autopass)) + # + #self.db.add_cfg_para_proc(self.process_id,"timepix_shutter_delay", "%#02x"%self.timepix_shutter_delay) + #self.db.add_cfg_para_proc(self.process_id,"timepix_shutter_width", "%#02x"%self.timepix_shutter_width) + # + #self.db.add_cfg_para_proc(self.process_id,"daq_loop_delay", repr(self.daq_loop_delay)) + # + #self.db.add_cfg_para_proc(self.process_id,"debug_scale", repr(self.debug_scale)) return "ok" From de0abe0c5554b094a9db4d232e71f85271651c4d Mon Sep 17 00:00:00 2001 From: Emanuele Leonardi Date: Thu, 28 Nov 2019 17:16:55 +0100 Subject: [PATCH 30/64] PadmeDAQ: fixed DBINFO output --- PadmeDAQ/src/DAQ.c | 2 +- PadmeDAQ/src/ZSUP.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/PadmeDAQ/src/DAQ.c b/PadmeDAQ/src/DAQ.c index 0c30bb8b..9807eba9 100644 --- a/PadmeDAQ/src/DAQ.c +++ b/PadmeDAQ/src/DAQ.c @@ -108,7 +108,7 @@ int DAQ_connect () // sprintf(outstr,"%d",Config->board_sn); // db_add_cfg_para(Config->process_id,"board_sn",outstr); //} - printf("DBINFO - add_proc_cfg_para %d %s %d\n",Config->process_id,"board_sn",Config->board_sn); + printf("DBINFO - add_proc_config_para %d %s %d\n",Config->process_id,"board_sn",Config->board_sn); return 0; diff --git a/PadmeDAQ/src/ZSUP.c b/PadmeDAQ/src/ZSUP.c index fc319a51..07ba075a 100644 --- a/PadmeDAQ/src/ZSUP.c +++ b/PadmeDAQ/src/ZSUP.c @@ -225,7 +225,7 @@ int ZSUP_readdata () // sprintf(outstr,"%u",board_sn); // db_add_cfg_para(Config->process_id,"board_sn",outstr); //} - printf("DBINFO - add_proc_cfg_para %d %s %u\n",Config->process_id,"board_sn",board_sn); + printf("DBINFO - add_proc_config_para %d %s %u\n",Config->process_id,"board_sn",board_sn); // Fourth line: start of file time tag unsigned int start_time; From 4ae1cdb73a03403f3a29d1e33436f0f673a16669 Mon Sep 17 00:00:00 2001 From: Emanuele Leonardi Date: Thu, 28 Nov 2019 17:57:40 +0100 Subject: [PATCH 31/64] RunControl: added method to manage DBINFO lines --- RunControl/code/ADCBoard.py | 20 +- RunControl/code/Level1.py | 9 +- RunControl/code/Merger.py | 9 +- RunControl/code/PadmeDB.py | 353 ++++++++++++++++++++++++++-- RunControl/code/Run.py | 38 ++- RunControl/code/RunControlServer.py | 126 +++++----- RunControl/code/Trigger.py | 11 +- 7 files changed, 466 insertions(+), 100 deletions(-) diff --git a/RunControl/code/ADCBoard.py b/RunControl/code/ADCBoard.py index db0de3b8..f9b9ef8b 100644 --- a/RunControl/code/ADCBoard.py +++ b/RunControl/code/ADCBoard.py @@ -616,7 +616,8 @@ def start_daq(self): return 0 # Tag start of process in DB - if self.run_number: self.db.set_process_time_create(self.proc_daq_id) + if self.run_number: + self.db.set_process_time_create(self.proc_daq_id,self.db.now_str()) # Return process id return self.process_daq.pid @@ -629,7 +630,8 @@ def stop_daq(self): # Process exited: clean up defunct process and close log file self.process_daq.wait() self.log_handle_daq.close() - if self.run_number: self.db.set_process_time_end(self.proc_daq_id) + if self.run_number: + self.db.set_process_time_end(self.proc_daq_id,self.db.now_str()) return True time.sleep(0.5) @@ -651,7 +653,8 @@ def stop_daq(self): # Process exited: clean up defunct process and close log file self.process_daq.wait() self.log_handle_daq.close() - if self.run_number: self.db.set_process_time_end(self.proc_daq_id) + if self.run_number: + self.db.set_process_time_end(self.proc_daq_id,self.db.now_str()) return True time.sleep(0.5) @@ -662,7 +665,7 @@ def stop_daq(self): if self.process_daq.poll() != None: self.process_daq.wait() self.log_handle_daq.close() - if self.run_number: self.db.set_process_time_end(self.proc_daq_id) + if self.run_number: self.db.set_process_time_end(self.proc_daq_id,self.db.now_str()) return False def start_zsup(self): @@ -688,7 +691,8 @@ def start_zsup(self): return 0 # Tag start of process in DB - if self.run_number: self.db.set_process_time_create(self.proc_zsup_id) + if self.run_number: + self.db.set_process_time_create(self.proc_zsup_id,self.db.now_str()) # Return process id return self.process_zsup.pid @@ -703,7 +707,8 @@ def stop_zsup(self): # Process exited: clean up defunct process and close log file self.process_zsup.wait() self.log_handle_zsup.close() - if self.run_number: self.db.set_process_time_end(self.proc_zsup_id) + if self.run_number: + self.db.set_process_time_end(self.proc_zsup_id,self.db.now_str()) return True time.sleep(0.5) @@ -714,5 +719,6 @@ def stop_zsup(self): if self.process_zsup.poll() != None: self.process_zsup.wait() self.log_handle_zsup.close() - if self.run_number: self.db.set_process_time_end(self.proc_zsup_id) + if self.run_number: + self.db.set_process_time_end(self.proc_zsup_id,self.db.now_str()) return False diff --git a/RunControl/code/Level1.py b/RunControl/code/Level1.py index 792bf776..cd72b63b 100644 --- a/RunControl/code/Level1.py +++ b/RunControl/code/Level1.py @@ -165,7 +165,8 @@ def start_level1(self): return 0 # Tag start of process in DB - if self.run_number: self.db.set_process_time_create(self.process_id) + if self.run_number: + self.db.set_process_time_create(self.process_id,self.db.now_str()) # Return process id return self.process.pid @@ -180,7 +181,8 @@ def stop_level1(self): # Process exited: clean up defunct process and close log file self.process.wait() self.log_handle.close() - if self.run_number: self.db.set_process_time_end(self.process_id) + if self.run_number: + self.db.set_process_time_end(self.process_id,self.db.now_str()) retur True time.sleep(1) @@ -192,5 +194,6 @@ def stop_level1(self): self.process.wait() self.log_handle.close() - if self.run_number: self.db.set_process_time_end(self.process_id) + if self.run_number: + self.db.set_process_time_end(self.process_id,self.db.now_str()) return False diff --git a/RunControl/code/Merger.py b/RunControl/code/Merger.py index 0b9168e4..4b56c5bf 100644 --- a/RunControl/code/Merger.py +++ b/RunControl/code/Merger.py @@ -158,7 +158,8 @@ def start_merger(self): return 0 # Tag start of process in DB - if self.run_number: self.db.set_process_time_create(self.process_id) + if self.run_number: + self.db.set_process_time_create(self.process_id,self.db.now_str()) # Return process id return self.process.pid @@ -171,7 +172,8 @@ def stop_merger(self): # Process exited: clean up defunct process and close log file self.process.wait() self.log_handle.close() - if self.run_number: self.db.set_process_time_end(self.process_id) + if self.run_number: + self.db.set_process_time_end(self.process_id,self.db.now_str()) return True time.sleep(1) @@ -183,5 +185,6 @@ def stop_merger(self): self.process.wait() self.log_handle.close() - if self.run_number: self.db.set_process_time_end(self.process_id) + if self.run_number: + self.db.set_process_time_end(self.process_id,self.db.now_str()) return False diff --git a/RunControl/code/PadmeDB.py b/RunControl/code/PadmeDB.py index 453c5277..6130acf6 100644 --- a/RunControl/code/PadmeDB.py +++ b/RunControl/code/PadmeDB.py @@ -14,6 +14,53 @@ def __init__(self): self.DB_PASSWD = os.getenv('PADME_DB_PASSWD','unknown') self.DB_NAME = os.getenv('PADME_DB_NAME' ,'PadmeDAQ') + # DB codes for run status + self.DB_RUN_STATUS_INITIALIZED = 1 + self.DB_RUN_STATUS_RUNNING = 2 + self.DB_RUN_STATUS_END_OK = 3 + self.DB_RUN_STATUS_ABORTED = 4 + self.DB_RUN_STATUS_INIT_ERROR = 5 + self.DB_RUN_STATUS_END_ERROR = 6 + self.DB_RUN_STATUS_UNKNOWN = 7 + + # Create regexp used to decode DBINFO lines + + # file_create + self.re_file_create = re.compile("^\s*DBINFO\s+-\s+file_create\s+(\S+)\s+(\S+)\s+(\d+)\s+(\d+)\s+(\d+)\s*$") + + # file_set_time_open