diff --git a/server/recceiver/announce.py b/server/recceiver/announce.py index ab18a27..25a8cc9 100644 --- a/server/recceiver/announce.py +++ b/server/recceiver/announce.py @@ -4,9 +4,9 @@ import struct from twisted.internet import protocol -from twisted.logger import Logger +import logging -_log = Logger(__name__) +_log = logging.getLogger(__name__) _Ann = struct.Struct('>HH4sHHI') @@ -53,14 +53,14 @@ def sendOne(self): self.D = self.reactor.callLater(self.delay, self.sendOne) for A in self.udps: try: - _log.debug('announce to {s}',s=A) + _log.debug('announce to {s}'.format(s=A)) self.transport.write(self.msg, A) try: self.udpErr.remove(A) - _log.warn('announce OK to {s}',s=A) + _log.warning('announce OK to {s}'.format(s=A)) except KeyError: pass except: if A not in self.udpErr: self.udpErr.add(A) - _log.exception('announce Error to {s}',s=A) + _log.exception('announce Error to {s}'.format(s=A)) diff --git a/server/recceiver/application.py b/server/recceiver/application.py index b5d962e..857dd09 100644 --- a/server/recceiver/application.py +++ b/server/recceiver/application.py @@ -10,14 +10,13 @@ from twisted.internet import defer from twisted.internet.error import CannotListenError from twisted.application import service -from twisted.logger import Logger from .recast import CastFactory from .udpbcast import SharedUDP from .announce import Announcer from .processors import ProcessorController -_log = Logger(__name__) +_log = logging.getLogger(__name__) class Log2Twisted(logging.StreamHandler): """Print logging module stream to the twisted log @@ -90,7 +89,7 @@ def privilegedStartService(self): # Find out which port is in use addr = self.tcp.getHost() - _log.info('RecService listening on {addr}', addr=addr) + _log.info('RecService listening on {addr}'.format(addr=addr)) self.key = random.randint(0,0xffffffff) @@ -138,7 +137,7 @@ def makeService(self, opts): lvlname = conf.get('loglevel', 'WARN') lvl = logging.getLevelName(lvlname) if not isinstance(lvl, (int, )): - print("Invalid loglevel", lvlname) + print("Invalid loglevel {}. Setting to WARN level instead.".format(lvlname)) lvl = logging.WARN fmt = conf.get('logformat', "%(levelname)s:%(name)s %(message)s") diff --git a/server/recceiver/cfstore.py b/server/recceiver/cfstore.py index fbb9116..1c8e2c7 100755 --- a/server/recceiver/cfstore.py +++ b/server/recceiver/cfstore.py @@ -1,8 +1,8 @@ # -*- coding: utf-8 -*- -from twisted.logger import Logger +import logging import socket -_log = Logger(__name__) +_log = logging.getLogger(__name__) from zope.interface import implementer @@ -39,7 +39,7 @@ @implementer(interfaces.IProcessor) class CFProcessor(service.Service): def __init__(self, name, conf): - _log.info("CF_INIT {name}", name=name) + _log.info("CF_INIT {name}".format(name=name)) self.name, self.conf = name, conf self.channel_dict = defaultdict(list) self.iocs = dict() @@ -156,7 +156,7 @@ def waitForThread(_ignored): def chainError(err): if not err.check(defer.CancelledError): - _log.error("CF_COMMIT FAILURE: {s}", s=err) + _log.error("CF_COMMIT FAILURE: {s}".format(s=err)) if self.cancelled: if not err.check(defer.CancelledError): raise defer.CancelledError() @@ -177,7 +177,7 @@ def _commitWithThread(self, TR): if not self.running: raise defer.CancelledError('CF Processor is not running (TR: {host}:{port})', host=TR.src.host, port=TR.src.port) - _log.info("CF_COMMIT: {TR}", TR=TR) + _log.info("CF_COMMIT: {TR}".format(TR=TR)) """ a dictionary with a list of records with their associated property info pvInfo @@ -203,7 +203,7 @@ def _commitWithThread(self, TR): for rid, (recinfos) in TR.recinfos.items(): # find intersection of these sets if rid not in pvInfo: - _log.warn('IOC: {iocid}: PV not found for recinfo with RID: {rid}', iocid=iocid, rid=rid) + _log.warning('IOC: {iocid}: PV not found for recinfo with RID: {rid}'.format(iocid=iocid, rid=rid)) continue recinfo_wl = [p for p in self.whitelist if p in recinfos.keys()] if recinfo_wl: @@ -215,7 +215,7 @@ def _commitWithThread(self, TR): for rid, alias in TR.aliases.items(): if rid not in pvInfo: - _log.warn('IOC: {iocid}: PV not found for alias with RID: {rid}', iocid=iocid, rid=rid) + _log.warning('IOC: {iocid}: PV not found for alias with RID: {rid}'.format(iocid=iocid, rid=rid)) continue pvInfo[rid]['aliases'] = alias @@ -228,19 +228,19 @@ def _commitWithThread(self, TR): pvInfo[rid]['infoProperties'] = list() pvInfo[rid]['infoProperties'].append(property) else: - _log.debug('EPICS environment var {env_var} listed in environment_vars setting list not found in this IOC: {iocName}', env_var=epics_env_var_name, iocName=iocName) + _log.debug('EPICS environment var {env_var} listed in environment_vars setting list not found in this IOC: {iocName}'.format(env_var=epics_env_var_name, iocName=iocName)) delrec = list(TR.delrec) - _log.debug("Delete records: {s}", s=delrec) + _log.debug("Delete records: {s}".format(s=delrec)) pvInfoByName = {} for rid, (info) in pvInfo.items(): if info["pvName"] in pvInfoByName: - _log.warn("Commit contains multiple records with PV name: {pv} ({iocid})", pv=info["pvName"], iocid=iocid) + _log.warning("Commit contains multiple records with PV name: {pv} ({iocid})".format(pv=info["pvName"], iocid=iocid)) continue pvInfoByName[info["pvName"]] = info - _log.debug("Add record: {rid}: {info}", rid=rid, info=info) + _log.debug("Add record: {rid}: {info}".format(rid=rid, info=info)) if TR.initial: """Add IOC to source list """ @@ -301,13 +301,13 @@ def clean_service(self): _log.info("CF Clean Completed") return except RequestException as e: - _log.error("Clean service failed: {s}", s=e) + _log.error("Clean service failed: {s}".format(s=e)) retry_seconds = min(60, sleep) - _log.info("Clean service retry in {retry_seconds} seconds", retry_seconds=retry_seconds) + _log.info("Clean service retry in {retry_seconds} seconds".format(retry_seconds=retry_seconds)) time.sleep(retry_seconds) sleep *= 1.5 if self.running == 0 and sleep >= retry_limit: - _log.info("Abandoning clean after {retry_limit} seconds", retry_limit=retry_limit) + _log.info("Abandoning clean after {retry_limit} seconds".format(retry_limit=retry_limit)) return def get_active_channels(self, recceiverid): @@ -317,8 +317,8 @@ def clean_channels(self, owner, channels): new_channels = [] for ch in channels or []: new_channels.append(ch[u'name']) - _log.info("Total channels to update: {nChannels}", nChannels=len(new_channels)) - _log.debug('Update "pvStatus" property to "Inactive" for {n_channels} channels', n_channels=len(new_channels)) + _log.info("Total channels to update: {nChannels}".format(nChannels=len(new_channels))) + _log.debug('Update "pvStatus" property to "Inactive" for {n_channels} channels'.format(n_channels=len(new_channels))) self.client.update(property={u'name': 'pvStatus', u'owner': owner, u'value': "Inactive"}, channelNames=new_channels) @@ -339,7 +339,7 @@ def dict_to_file(dict, iocs, conf): def __updateCF__(proc, pvInfoByName, delrec, hostName, iocName, iocIP, iocid, owner, iocTime): - _log.info("CF Update IOC: {iocid}", iocid=iocid) + _log.info("CF Update IOC: {iocid}".format(iocid=iocid)) # Consider making this function a class methed then 'proc' simply becomes 'self' client = proc.client @@ -356,7 +356,7 @@ def __updateCF__(proc, pvInfoByName, delrec, hostName, iocName, iocIP, iocid, ow iocTime = iocs[iocid]["time"] iocIP = iocs[iocid]["iocIP"] else: - _log.warn('IOC Env Info not found: {iocid}', iocid=iocid) + _log.warning('IOC Env Info not found: {iocid}'.format(iocid=iocid)) if hostName is None or iocName is None: raise Exception('missing hostName or iocName') @@ -366,7 +366,7 @@ def __updateCF__(proc, pvInfoByName, delrec, hostName, iocName, iocIP, iocid, ow channels = [] """A list of channels in channelfinder with the associated hostName and iocName""" - _log.debug('Find existing channels by IOCID: {iocid}', iocid=iocid) + _log.debug('Find existing channels by IOCID: {iocid}'.format(iocid=iocid)) old = client.findByArgs(prepareFindArgs(conf, [('iocid', iocid)])) if proc.cancelled: raise defer.CancelledError() @@ -381,7 +381,7 @@ def __updateCF__(proc, pvInfoByName, delrec, hostName, iocName, iocIP, iocid, ow if (conf.get('recordType', 'default') == 'on'): ch[u'properties'] = __merge_property_lists(ch[u'properties'].append({u'name': 'recordType', u'owner': owner, u'value': iocs[channels_dict[ch[u'name']][-1]]["recordType"]}), ch[u'properties']) channels.append(ch) - _log.debug("Add existing channel to previous IOC: {s}", s=channels[-1]) + _log.debug("Add existing channel to previous IOC: {s}".format(s=channels[-1])) """In case alias exist, also delete them""" if (conf.get('alias', 'default') == 'on'): if ch[u'name'] in pvInfoByName and "aliases" in pvInfoByName[ch[u'name']]: @@ -393,7 +393,7 @@ def __updateCF__(proc, pvInfoByName, delrec, hostName, iocName, iocIP, iocid, ow if (conf.get('recordType', 'default') == 'on'): ch[u'properties'] = __merge_property_lists(ch[u'properties'].append({u'name': 'recordType', u'owner': owner, u'value': iocs[channels_dict[a[u'name']][-1]]["recordType"]}), ch[u'properties']) channels.append(a) - _log.debug("Add existing alias to previous IOC: {s}", s=channels[-1]) + _log.debug("Add existing alias to previous IOC: {s}".format(s=channels[-1])) else: """Orphan the channel : mark as inactive, keep the old hostName and iocName""" @@ -401,7 +401,7 @@ def __updateCF__(proc, pvInfoByName, delrec, hostName, iocName, iocIP, iocid, ow {u'name': 'time', u'owner': owner, u'value': iocTime}], ch[u'properties']) channels.append(ch) - _log.debug("Add orphaned channel with no IOC: {s}", s=channels[-1]) + _log.debug("Add orphaned channel with no IOC: {s}".format(s=channels[-1])) """Also orphan any alias""" if (conf.get('alias', 'default') == 'on'): if ch[u'name'] in pvInfoByName and "aliases" in pvInfoByName[ch[u'name']]: @@ -410,7 +410,7 @@ def __updateCF__(proc, pvInfoByName, delrec, hostName, iocName, iocIP, iocid, ow {u'name': 'time', u'owner': owner, u'value': iocTime}], a[u'properties']) channels.append(a) - _log.debug("Add orphaned alias with no IOC: {s}", s=channels[-1]) + _log.debug("Add orphaned alias with no IOC: {s}".format(s=channels[-1])) else: if ch[u'name'] in new: # case: channel in old and new """ @@ -421,7 +421,7 @@ def __updateCF__(proc, pvInfoByName, delrec, hostName, iocName, iocIP, iocid, ow {u'name': 'time', u'owner': owner, u'value': iocTime}], ch[u'properties']) channels.append(ch) - _log.debug("Add existing channel with same IOC: {s}", s=channels[-1]) + _log.debug("Add existing channel with same IOC: {s}".format(s=channels[-1])) new.remove(ch[u'name']) """In case, alias exist""" @@ -445,7 +445,7 @@ def __updateCF__(proc, pvInfoByName, delrec, hostName, iocName, iocIP, iocid, ow u'owner': owner, u'properties': aprops}) new.remove(a[u'name']) - _log.debug("Add existing alias with same IOC: {s}", s=channels[-1]) + _log.debug("Add existing alias with same IOC: {s}".format(s=channels[-1])) # now pvNames contains a list of pv's new on this host/ioc """A dictionary representing the current channelfinder information associated with the pvNames""" existingChannels = {} @@ -468,7 +468,7 @@ def __updateCF__(proc, pvInfoByName, delrec, hostName, iocName, iocIP, iocid, ow searchStrings.append(searchString) for eachSearchString in searchStrings: - _log.debug('Find existing channels by name: {search}', search=eachSearchString) + _log.debug('Find existing channels by name: {search}'.format(search=eachSearchString)) for ch in client.findByArgs(prepareFindArgs(conf, [('~name', eachSearchString)])): existingChannels[ch["name"]] = ch if proc.cancelled: @@ -486,7 +486,7 @@ def __updateCF__(proc, pvInfoByName, delrec, hostName, iocName, iocIP, iocid, ow existingChannel = existingChannels[pv] existingChannel["properties"] = __merge_property_lists(newProps, existingChannel["properties"]) channels.append(existingChannel) - _log.debug("Add existing channel with different IOC: {s}", s=channels[-1]) + _log.debug("Add existing channel with different IOC: {s}".format(s=channels[-1])) """in case, alias exists, update their properties too""" if (conf.get('alias', 'default') == 'on'): if pv in pvInfoByName and "aliases" in pvInfoByName[pv]: @@ -502,14 +502,14 @@ def __updateCF__(proc, pvInfoByName, delrec, hostName, iocName, iocIP, iocid, ow channels.append({u'name': a, u'owner': owner, u'properties': alProps}) - _log.debug("Add existing alias with different IOC: {s}", s=channels[-1]) + _log.debug("Add existing alias with different IOC: {s}".format(s=channels[-1])) else: """New channel""" channels.append({u'name': pv, u'owner': owner, u'properties': newProps}) - _log.debug("Add new channel: {s}", s=channels[-1]) + _log.debug("Add new channel: {s}".format(s=channels[-1])) if (conf.get('alias', 'default') == 'on'): if pv in pvInfoByName and "aliases" in pvInfoByName[pv]: alProps = [{u'name': 'alias', u'owner': owner, u'value': pv}] @@ -519,8 +519,8 @@ def __updateCF__(proc, pvInfoByName, delrec, hostName, iocName, iocIP, iocid, ow channels.append({u'name': a, u'owner': owner, u'properties': alProps}) - _log.debug("Add new alias: {s}", s=channels[-1]) - _log.info("Total channels to update: {nChannels} {iocName}", nChannels=len(channels), iocName=iocName) + _log.debug("Add new alias: {s}".format(s=channels[-1])) + _log.info("Total channels to update: {nChannels} {iocName}".format(nChannels=len(channels), iocName=iocName)) if len(channels) != 0: client.set(channels=channels) else: @@ -574,7 +574,7 @@ def prepareFindArgs(conf, args, size=0): def poll(update, proc, pvInfoByName, delrec, hostName, iocName, iocIP, iocid, owner, iocTime): - _log.info("Polling {iocName} begins...", iocName=iocName) + _log.info("Polling {iocName} begins...".format(iocName=iocName)) sleep = 1 success = False while not success: @@ -583,10 +583,10 @@ def poll(update, proc, pvInfoByName, delrec, hostName, iocName, iocIP, iocid, ow success = True return success except RequestException as e: - _log.error("ChannelFinder update failed: {s}", s=e) + _log.error("ChannelFinder update failed: {s}".format(s=e)) retry_seconds = min(60, sleep) - _log.info("ChannelFinder update retry in {retry_seconds} seconds", retry_seconds=retry_seconds) + _log.info("ChannelFinder update retry in {retry_seconds} seconds".format(retry_seconds=retry_seconds)) #_log.debug(str(channels_dict)) time.sleep(retry_seconds) sleep *= 1.5 - _log.info("Polling {iocName} complete", iocName=iocName) + _log.info("Polling {iocName} complete".format(iocName=iocName)) diff --git a/server/recceiver/dbstore.py b/server/recceiver/dbstore.py index 852900d..189b7f6 100644 --- a/server/recceiver/dbstore.py +++ b/server/recceiver/dbstore.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- import itertools -from twisted.logger import Logger +import logging from zope.interface import implementer @@ -11,7 +11,7 @@ from . import interfaces -_log = Logger(__name__) +_log = logging.getLogger(__name__) __all__ = ['DBProcessor'] diff --git a/server/recceiver/processors.py b/server/recceiver/processors.py index b0bbc7c..c633ff5 100644 --- a/server/recceiver/processors.py +++ b/server/recceiver/processors.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -from twisted.logger import Logger +import logging import sys from zope.interface import implementer @@ -17,7 +17,7 @@ from twisted.application import service from . import interfaces -_log = Logger(__name__) +_log = logging.getLogger(__name__) __all__ = [ 'ShowProcessor', @@ -77,7 +77,7 @@ def __init__(self, cfile=None): plugs = {} for plug in plugin.getPlugins(interfaces.IProcessorFactory): - _log.debug('Available plugin: {name}', name=plug.name) + _log.debug('Available plugin: {name}'.format(name=plug.name)) plugs[plug.name] = plug self.procs = [] @@ -109,13 +109,13 @@ def commit(self, trans): def punish(err, B): if err.check(defer.CancelledError): - _log.debug('Cancel processing: {name}: {trans}', name=B.name, trans=trans) + _log.debug('Cancel processing: {name}: {trans}'.format(name=B.name, trans=trans)) return err try: self.procs.remove(B) - _log.error('Remove processor: {name}: {err}', name=B.name, err=err) + _log.error('Remove processor: {name}: {err}'.format(name=B.name, err=err)) except: - _log.debug('Remove processor: {name}: aleady removed', name=B.name) + _log.debug('Remove processor: {name}: aleady removed'.format(name=B.name)) return err defers = [ defer.maybeDeferred(P.commit, trans).addErrback(punish, P) for P in self.procs ] @@ -136,7 +136,7 @@ def __init__(self, name, opts): def startService(self): service.Service.startService(self) - _log.info("Show processor '{processor}' starting", processor=self.name) + _log.info("Show processor '{processor}' starting".format(processor=self.name)) def commit(self, transaction): @@ -162,25 +162,25 @@ def releaseLock(result): def _commit(self, trans): - _log.debug("# Show processor '{name}' commit", name=self.name) - _log.info("# From {host}:{port}", host=trans.src.host,port=trans.src.port) + _log.debug("# Show processor '{name}' commit".format(name=self.name)) + _log.info("# From {host}:{port}".format(host=trans.src.host,port=trans.src.port)) if not trans.connected: _log.info("# connection lost") for item in trans.infos.items(): - _log.info(" epicsEnvSet('{name}','{value}')", name=item[0], value=item[1]) + _log.info(" epicsEnvSet('{name}','{value}')".format(name=item[0], value=item[1])) for rid, (rname, rtype) in trans.addrec.items(): - _log.info(" record({rtype}, \"{rname}\") {", rtype=rtype, rname=rname) + _log.info(" record({rtype}, \"{rname}\") {{".format(rtype=rtype, rname=rname)) for alias in trans.aliases.get(rid, []): - _log.info(" alias(\"{alias}\")", alias=alias) + _log.info(" alias(\"{alias}\")".format(alias=alias)) for item in trans.recinfos.get(rid, {}).items(): - _log.info(" info({name},\"{value}\")", name=item[0], value=[1]) + _log.info(" info({name},\"{value}\")".format(name=item[0], value=[1])) _log.info(" }") yield _log.info("# End") def stopService(self): service.Service.stopService(self) - _log.info("Show processor '{name}' stopping", name=self.name) + _log.info("Show processor '{name}' stopping".format(name=self.name)) @implementer(plugin.IPlugin, interfaces.IProcessorFactory) diff --git a/server/recceiver/recast.py b/server/recceiver/recast.py index 893dabd..1dcd858 100644 --- a/server/recceiver/recast.py +++ b/server/recceiver/recast.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- -from twisted.logger import Logger -_log = Logger(__name__) +import logging +_log = logging.getLogger(__name__) import sys, time @@ -109,7 +109,7 @@ def recvHeader(self, data): self.restartPingTimer() magic, msgid, blen = _Head.unpack(data) if magic!=_M: - _log.error('Protocol error! Bad magic {magic}',magic=magic) + _log.error('Protocol error! Bad magic {magic}'.format(magic=magic)) self.transport.loseConnection() return self.msgid = msgid @@ -123,7 +123,7 @@ def recvHeader(self, data): def recvClientGreeting(self, body): cver, ctype, skey = _c_greet.unpack(body[:_c_greet.size]) if ctype!=0: - _log.error("I don't understand you! {s}", s=ctype) + _log.error("I don't understand you! {s}".format(s=ctype)) self.transport.loseConnection() return self.version = min(self.version, cver) @@ -135,7 +135,7 @@ def recvClientGreeting(self, body): def recvPong(self, body): nonce, = _ping.unpack(body[:_ping.size]) if nonce != self.nonce: - _log.error('pong nonce does not match! {pong_nonce}!={nonce}',pong_nonce=nonce,nonce=self.nonce) + _log.error('pong nonce does not match! {pong_nonce}!={nonce}'.format(pong_nonce=nonce,nonce=self.nonce)) self.transport.loseConnection() else: _log.debug('pong nonce match') @@ -194,7 +194,7 @@ def recvDone(self, body): size_kb = self.uploadSize / 1024 rate_kbs = size_kb / elapsed_s src = "{}:{}".format(self.sess.ep.host, self.sess.ep.port) - _log.info('Done message from {src}: uploaded {size_kb}kB in {elapsed_s}s ({rate_kbs}kB/s)', src=src, size_kb=size_kb, elapsed_s=elapsed_s, rate_kbs=rate_kbs) + _log.info('Done message from {src}: uploaded {size_kb}kB in {elapsed_s}s ({rate_kbs}kB/s)'.format(src=src, size_kb=size_kb, elapsed_s=elapsed_s, rate_kbs=rate_kbs)) return self.getInitialState() @@ -238,7 +238,7 @@ class CollectionSession(object): def __init__(self, proto, endpoint): from twisted.internet import reactor - _log.info("Open session from {endpoint}",endpoint=endpoint) + _log.info("Open session from {endpoint}".format(endpoint=endpoint)) self.reactor = reactor self.proto, self.ep = proto, endpoint self.TR = Transaction(self.ep, id(self)) @@ -248,7 +248,7 @@ def __init__(self, proto, endpoint): self.dirty = False def close(self): - _log.info("Close session from {ep}", ep=self.ep) + _log.info("Close session from {ep}".format(ep=self.ep)) def suppressCancelled(err): if not err.check(defer.CancelledError): @@ -265,7 +265,7 @@ def suppressCancelled(err): self.flush() def flush(self, connected=True): - _log.info("Flush session from {s}", s=self.ep) + _log.info("Flush session from {s}".format(s=self.ep)) self.T = None if not self.dirty: return @@ -274,15 +274,15 @@ def flush(self, connected=True): self.dirty = False def commit(_ignored): - _log.info('Commit: {TR}', TR=TR) + _log.info('Commit: {TR}'.format(TR=TR)) return defer.maybeDeferred(self.factory.commit, TR) def abort(err): if err.check(defer.CancelledError): - _log.info('Commit cancelled: {TR}', TR=TR) + _log.info('Commit cancelled: {TR}'.format(TR=TR)) return err else: - _log.error('Commit failure: {err}', err=err) + _log.error('Commit failure: {err}'.format(err=err)) self.proto.transport.loseConnection() raise defer.CancelledError()