本文转至 余子越的博客 ,文章 Python 日志模块应用,欢迎访问yuchaoshui.com 了解更多信息!
一、 简单使用一
直接使用 logging 模块,不手动创建logger。
#!/usr/bin/env python
# _*_ coding:utf-8 _*_
import logging
log_level = "DEBUG"
log_file = "test.log"
log_level_map = {'DEBUG':10, 'INFO':20, 'WARNING':30, 'ERROR':40, 'CRITICAL':50}
logging.basicConfig(
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)-8s %(message)s',
level=log_level_map[log_level],
datefmt='%Y-%m-%d %H:%M:%S',
filename='%s' % log_file,
filemode='a',
)
logging.debug('debug message')
logging.info('info message')
logging.warning('warning message')
logging.error('error message')
logging.critical('critical message')
二、 简单使用二
创建一个 logger 日志管理对象。
#!/usr/bin/env python
# _*_ coding:utf-8 _*_
import logging
logger = logging.getLogger("mainlog")
handler = logging.FileHandler("test.log")
logger.setLevel(logging.INFO)
fmt = logging.Formatter(
'%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
handler.setFormatter(fmt)
logger.addHandler(handler)
logger.debug('debug message')
logger.info('info message')
logger.warning('warning message')
logger.error('error message')
logger.critical('critical message')
三、多进程多线程写日志
- 编辑模块 multiprocessing_logging.py 点此下载
# _*_ coding:utf-8 _*_
from __future__ import absolute_import, division, unicode_literals
import sys
import logging
import threading
import multiprocessing
from logging import Handler, StreamHandler, FileHandler
from logging.handlers import RotatingFileHandler, TimedRotatingFileHandler
import traceback
import time
__all__ = ['logger']
DEFAULT_LOG_FILE = "test.log"
try:
import queue
except ImportError:
# Python 2.0.
import Queue as queue
def install_mp_handler(logger=None):
"""Wraps the handlers in the given Logger with an MultiProcessingHandler.
:param logger: whose handlers to wrap. By default, the root logger.
"""
if logger is None:
logger = logging.getLogger()
for i, orig_handler in enumerate(list(logger.handlers)):
handler = MultiProcessingHandler(
'mp-handler-{0}'.format(i), sub_handler=orig_handler)
logger.removeHandler(orig_handler)
logger.addHandler(handler)
class MultiProcessingHandler(Handler):
def __init__(self, name, sub_handler=None):
super(MultiProcessingHandler, self).__init__()
if sub_handler is None:
sub_handler = StreamHandler()
self.sub_handler = sub_handler
self.setLevel(self.sub_handler.level)
self.setFormatter(self.sub_handler.formatter)
self.queue = multiprocessing.Queue(-1)
self._is_closed = False
# The thread handles receiving records asynchronously.
self._receive_thread = threading.Thread(target=self._receive, name=name)
self._receive_thread.daemon = True
self._receive_thread.start()
def setFormatter(self, fmt):
super(MultiProcessingHandler, self).setFormatter(fmt)
self.sub_handler.setFormatter(fmt)
def _receive(self):
while not (self._is_closed and self.queue.empty()):
try:
record = self.queue.get(timeout=0.2)
self.sub_handler.emit(record)
except (KeyboardInterrupt, SystemExit):
raise
except EOFError:
break
except queue.Empty:
pass # This periodically checks if the logger is closed.
except:
traceback.print_exc(file=sys.stderr)
self.queue.close()
self.queue.join_thread()
def _send(self, s):
self.queue.put_nowait(s)
def _format_record(self, record):
# ensure that exc_info and args
# have been stringified. Removes any chance of
# unpickleable things inside and possibly reduces
# message size sent over the pipe.
if record.args:
record.msg = record.msg % record.args
record.args = None
if record.exc_info:
self.format(record)
record.exc_info = None
return record
def emit(self, record):
try:
s = self._format_record(record)
self._send(s)
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
def close(self):
if not self._is_closed:
self._is_closed = True
self._receive_thread.join(5.0) # Waits for receive queue to empty.
self.sub_handler.close()
super(MultiProcessingHandler, self).close()
def init_logger(logfile=DEFAULT_LOG_FILE, loglevel=logging.INFO):
logger_name = 'AppName'
logger = logging.getLogger(logger_name)
logger.setLevel(loglevel)
fmt = logging.Formatter(
'%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
###########################################################################################
#
## 控制台 handler
#stream_handler = StreamHandler(sys.stderr)
#
## 写文件 handler
#file_handler = FileHandler(logfile)
#
## 根据文件大小切分日志文件
#file_handler = RotatingFileHandler(logfile, mode="a", maxBytes=1024*1024, backupCount=3)
#
## 根据 时间 来切分日志文件,when可选值:M H D midnight
file_handler = TimedRotatingFileHandler(logfile, when="midnight", interval=1, backupCount=3)
#
###########################################################################################
file_handler.setFormatter(fmt)
#stream_handler.setFormatter(fmt)
logger.addHandler(file_handler)
#logger.addHandler(stream_handler)
return logger
# 此变量作为导出的 logger 实例
logger = init_logger(logfile='ycs.log', loglevel=logging.DEBUG)
# 如果是多进程、多线程则打开此注释
install_mp_handler(logger)
def test():
while True:
logger.debug('This is debug message!')
logger.info('This is info message!')
logger.warning('This is warning message!')
logger.error('This is error message!')
logger.critical('This is critical message!')
time.sleep(0.1)
def test_MP():
from multiprocessing import Pool
p = Pool(8)
for _ in range(9):
p.apply_async(test)
p.close()
p.join()
def test_MT():
import threading
for _ in range(9):
t = threading.Thread(target=test)
t.start()
if __name__ == '__main__':
#test()
#test_MP()
#test_MT()
pass
四、 结尾 总结
将上面的代码保存到模块中,可以从项目的任何位置导入 logger 实例。
本文转至 余子越的博客 ,文章 Python 日志模块应用,欢迎访问yuchaoshui.com 了解更多信息!