deftrace(self,msg_template:str,/,*,_tags:Sequence[str]|None=None,_exc_info:ExcInfo=False,**attributes:Any,)->None:"""Log a trace message. ```py import logfire logfire.configure() logfire.trace('This is a trace log') ``` Args: msg_template: The message to log. attributes: The attributes to bind to the log. _tags: An optional sequence of tags to include in the log. _exc_info: Set to an exception or a tuple as returned by [`sys.exc_info()`][sys.exc_info] to record a traceback with the log message. Set to `True` to use the currently handled exception. """ifany(k.startswith('_')forkinattributes):raiseValueError('Attribute keys cannot start with an underscore.')self.log('trace',msg_template,attributes,tags=_tags,exc_info=_exc_info)
defdebug(self,msg_template:str,/,*,_tags:Sequence[str]|None=None,_exc_info:ExcInfo=False,**attributes:Any,)->None:"""Log a debug message. ```py import logfire logfire.configure() logfire.debug('This is a debug log') ``` Args: msg_template: The message to log. attributes: The attributes to bind to the log. _tags: An optional sequence of tags to include in the log. _exc_info: Set to an exception or a tuple as returned by [`sys.exc_info()`][sys.exc_info] to record a traceback with the log message. Set to `True` to use the currently handled exception. """ifany(k.startswith('_')forkinattributes):raiseValueError('Attribute keys cannot start with an underscore.')self.log('debug',msg_template,attributes,tags=_tags,exc_info=_exc_info)
definfo(self,msg_template:str,/,*,_tags:Sequence[str]|None=None,_exc_info:ExcInfo=False,**attributes:Any,)->None:"""Log an info message. ```py import logfire logfire.configure() logfire.info('This is an info log') ``` Args: msg_template: The message to log. attributes: The attributes to bind to the log. _tags: An optional sequence of tags to include in the log. _exc_info: Set to an exception or a tuple as returned by [`sys.exc_info()`][sys.exc_info] to record a traceback with the log message. Set to `True` to use the currently handled exception. """ifany(k.startswith('_')forkinattributes):raiseValueError('Attribute keys cannot start with an underscore.')self.log('info',msg_template,attributes,tags=_tags,exc_info=_exc_info)
defnotice(self,msg_template:str,/,*,_tags:Sequence[str]|None=None,_exc_info:ExcInfo=False,**attributes:Any,)->None:"""Log a notice message. ```py import logfire logfire.configure() logfire.notice('This is a notice log') ``` Args: msg_template: The message to log. attributes: The attributes to bind to the log. _tags: An optional sequence of tags to include in the log. _exc_info: Set to an exception or a tuple as returned by [`sys.exc_info()`][sys.exc_info] to record a traceback with the log message. Set to `True` to use the currently handled exception. """ifany(k.startswith('_')forkinattributes):raiseValueError('Attribute keys cannot start with an underscore.')self.log('notice',msg_template,attributes,tags=_tags,exc_info=_exc_info)
defwarn(self,msg_template:str,/,*,_tags:Sequence[str]|None=None,_exc_info:ExcInfo=False,**attributes:Any,)->None:"""Log a warning message. ```py import logfire logfire.configure() logfire.warn('This is a warning log') ``` Args: msg_template: The message to log. attributes: The attributes to bind to the log. _tags: An optional sequence of tags to include in the log. _exc_info: Set to an exception or a tuple as returned by [`sys.exc_info()`][sys.exc_info] to record a traceback with the log message. Set to `True` to use the currently handled exception. """ifany(k.startswith('_')forkinattributes):raiseValueError('Attribute keys cannot start with an underscore.')self.log('warn',msg_template,attributes,tags=_tags,exc_info=_exc_info)
deferror(self,msg_template:str,/,*,_tags:Sequence[str]|None=None,_exc_info:ExcInfo=False,**attributes:Any,)->None:"""Log an error message. ```py import logfire logfire.configure() logfire.error('This is an error log') ``` Args: msg_template: The message to log. attributes: The attributes to bind to the log. _tags: An optional sequence of tags to include in the log. _exc_info: Set to an exception or a tuple as returned by [`sys.exc_info()`][sys.exc_info] to record a traceback with the log message. Set to `True` to use the currently handled exception. """ifany(k.startswith('_')forkinattributes):raiseValueError('Attribute keys cannot start with an underscore.')self.log('error',msg_template,attributes,tags=_tags,exc_info=_exc_info)
deffatal(self,msg_template:str,/,*,_tags:Sequence[str]|None=None,_exc_info:ExcInfo=False,**attributes:Any,)->None:"""Log a fatal message. ```py import logfire logfire.configure() logfire.fatal('This is a fatal log') ``` Args: msg_template: The message to log. attributes: The attributes to bind to the log. _tags: An optional sequence of tags to include in the log. _exc_info: Set to an exception or a tuple as returned by [`sys.exc_info()`][sys.exc_info] to record a traceback with the log message. Set to `True` to use the currently handled exception. """ifany(k.startswith('_')forkinattributes):raiseValueError('Attribute keys cannot start with an underscore.')self.log('fatal',msg_template,attributes,tags=_tags,exc_info=_exc_info)
defexception(self,msg_template:str,/,*,_tags:Sequence[str]|None=None,_exc_info:ExcInfo=True,**attributes:Any,)->None:"""The same as `error` but with `_exc_info=True` by default. This means that a traceback will be logged for any currently handled exception. Args: msg_template: The message to log. attributes: The attributes to bind to the log. _tags: An optional sequence of tags to include in the log. _exc_info: Set to an exception or a tuple as returned by [`sys.exc_info()`][sys.exc_info] to record a traceback with the log message. """ifany(k.startswith('_')forkinattributes):# pragma: no coverraiseValueError('Attribute keys cannot start with an underscore.')self.log('error',msg_template,attributes,tags=_tags,exc_info=_exc_info)
defspan(self,msg_template:str,/,*,_tags:Sequence[str]|None=None,_span_name:str|None=None,_level:LevelName|None=None,_links:Sequence[tuple[SpanContext,otel_types.Attributes]]=(),**attributes:Any,)->LogfireSpan:"""Context manager for creating a span. ```py import logfire logfire.configure() with logfire.span('This is a span {a=}', a='data'): logfire.info('new log 1') ``` Args: msg_template: The template for the span message. _span_name: The span name. If not provided, the `msg_template` will be used. _tags: An optional sequence of tags to include in the span. _level: An optional log level name. _links: An optional sequence of links to other spans. Each link is a tuple of a span context and attributes. attributes: The arguments to include in the span and format the message template with. Attributes starting with an underscore are not allowed. """ifany(k.startswith('_')forkinattributes):raiseValueError('Attribute keys cannot start with an underscore.')returnself._span(msg_template,attributes,_tags=_tags,_span_name=_span_name,_level=_level,_links=_links,)
Set to True to prevent a warning when instrumenting a generator function.
Read https://logfire.pydantic.dev/docs/guides/advanced/generators/#using-logfireinstrument first.
definstrument(# type: ignore[reportInconsistentOverload]self,msg_template:Callable[P,R]|LiteralString|None=None,*,span_name:str|None=None,extract_args:bool|Iterable[str]=True,allow_generator:bool=False,)->Callable[[Callable[P,R]],Callable[P,R]]|Callable[P,R]:"""Decorator for instrumenting a function as a span. ```py import logfire logfire.configure() @logfire.instrument('This is a span {a=}') def my_function(a: int): logfire.info('new log {a=}', a=a) ``` Args: msg_template: The template for the span message. If not provided, the module and function name will be used. span_name: The span name. If not provided, the `msg_template` will be used. extract_args: By default, all function call arguments are logged as span attributes. Set to `False` to disable this, or pass an iterable of argument names to include. allow_generator: Set to `True` to prevent a warning when instrumenting a generator function. Read https://logfire.pydantic.dev/docs/guides/advanced/generators/#using-logfireinstrument first. """ifcallable(msg_template):returnself.instrument()(msg_template)returninstrument(self,tuple(self._tags),msg_template,span_name,extract_args,allow_generator)
deflog(self,level:LevelName|int,msg_template:str,attributes:dict[str,Any]|None=None,tags:Sequence[str]|None=None,exc_info:ExcInfo=False,console_log:bool|None=None,)->None:"""Log a message. ```py import logfire logfire.configure() logfire.log('info', 'This is a log {a}', {'a': 'Apple'}) ``` Args: level: The level of the log. msg_template: The message to log. attributes: The attributes to bind to the log. tags: An optional sequence of tags to include in the log. exc_info: Set to an exception or a tuple as returned by [`sys.exc_info()`][sys.exc_info] to record a traceback with the log message. Set to `True` to use the currently handled exception. console_log: Whether to log to the console, defaults to `True`. """withhandle_internal_errors():stack_info=get_user_stack_info()attributes=attributesor{}merged_attributes={**stack_info,**attributes}if(msg:=attributes.pop(ATTRIBUTES_MESSAGE_KEY,None))isNone:fstring_frame=Noneifself._config.inspect_arguments:fstring_frame=inspect.currentframe()iffstring_frame.f_back.f_code.co_filename==Logfire.log.__code__.co_filename:# type: ignore# fstring_frame.f_back should be the user's frame.# The user called logfire.info or a similar method rather than calling logfire.log directly.fstring_frame=fstring_frame.f_back# type: ignoremsg,extra_attrs,msg_template=logfire_format_with_magic(msg_template,merged_attributes,self._config.scrubber,fstring_frame=fstring_frame,)ifextra_attrs:merged_attributes.update(extra_attrs)# Only do this if extra_attrs is not empty since the copy of `attributes` might be expensive.# We update both because attributes_json_schema_properties looks at `attributes`.attributes={**attributes,**extra_attrs}else:# The message has already been filled in, presumably by a logging integration.# Make sure it's a string.msg=merged_attributes[ATTRIBUTES_MESSAGE_KEY]=str(msg)msg_template=str(msg_template)otlp_attributes=prepare_otlp_attributes(merged_attributes)otlp_attributes={ATTRIBUTES_SPAN_TYPE_KEY:'log',**log_level_attributes(level),ATTRIBUTES_MESSAGE_TEMPLATE_KEY:msg_template,ATTRIBUTES_MESSAGE_KEY:msg,**otlp_attributes,}ifjson_schema_properties:=attributes_json_schema_properties(attributes):otlp_attributes[ATTRIBUTES_JSON_SCHEMA_KEY]=attributes_json_schema(json_schema_properties)tags=self._tags+tuple(tagsor())iftags:otlp_attributes[ATTRIBUTES_TAGS_KEY]=uniquify_sequence(tags)sample_rate=(self._sample_rateifself._sample_rateisnotNoneelseotlp_attributes.pop(ATTRIBUTES_SAMPLE_RATE_KEY,None))ifsample_rateisnotNoneandsample_rate!=1:# pragma: no coverotlp_attributes[ATTRIBUTES_SAMPLE_RATE_KEY]=sample_rateifnot(self._console_logifconsole_logisNoneelseconsole_log):otlp_attributes[DISABLE_CONSOLE_KEY]=Truestart_time=self._config.advanced.ns_timestamp_generator()span=self._logs_tracer.start_span(msg_template,attributes=otlp_attributes,start_time=start_time,)ifexc_info:ifexc_infoisTrue:exc_info=sys.exc_info()ifisinstance(exc_info,tuple):exc_info=exc_info[1]ifisinstance(exc_info,BaseException):record_exception(span,exc_info)ifotlp_attributes[ATTRIBUTES_LOG_LEVEL_NUM_KEY]>=LEVEL_NUMBERS['error']:# type: ignore# Set the status description to the exception message.# OTEL only lets us set the description when the status code is ERROR,# which we only want to do when the log level is error.set_exception_status(span,exc_info)elifexc_infoisnotNone:# pragma: no coverraiseTypeError(f'Invalid type for exc_info: {exc_info.__class__.__name__}')span.end(start_time)
A new Logfire instance which always uses the given tags.
importlogfirelogfire.configure()local_logfire=logfire.with_tags('tag1')local_logfire.info('a log message',_tags=['tag2'])# This is equivalent to:logfire.info('a log message',_tags=['tag1','tag2'])
defwith_tags(self,*tags:str)->Logfire:"""A new Logfire instance which always uses the given tags. ```py import logfire logfire.configure() local_logfire = logfire.with_tags('tag1') local_logfire.info('a log message', _tags=['tag2']) # This is equivalent to: logfire.info('a log message', _tags=['tag1', 'tag2']) ``` Args: tags: The tags to add. Returns: A new Logfire instance with the `tags` added to any existing tags. """returnself.with_settings(tags=tags)
The stack level offset to use when collecting stack info, also affects the warning which
message formatting might emit, defaults to 0 which means the stack info will be collected from the
position where logfire.log was called.
defwith_settings(self,*,tags:Sequence[str]=(),stack_offset:int|None=None,console_log:bool|None=None,custom_scope_suffix:str|None=None,)->Logfire:"""A new Logfire instance which uses the given settings. Args: tags: Sequence of tags to include in the log. stack_offset: The stack level offset to use when collecting stack info, also affects the warning which message formatting might emit, defaults to `0` which means the stack info will be collected from the position where [`logfire.log`][logfire.Logfire.log] was called. console_log: Whether to log to the console, defaults to `True`. custom_scope_suffix: A custom suffix to append to `logfire.` e.g. `logfire.loguru`. It should only be used when instrumenting another library with Logfire, such as structlog or loguru. See the `instrumenting_module_name` parameter on [TracerProvider.get_tracer][opentelemetry.sdk.trace.TracerProvider.get_tracer] for more info. Returns: A new Logfire instance with the given settings applied. """# TODO add sample_rate once it's more stablereturnLogfire(config=self._config,tags=self._tags+tuple(tags),sample_rate=self._sample_rate,console_log=self._console_logifconsole_logisNoneelseconsole_log,otel_scope=self._otel_scopeifcustom_scope_suffixisNoneelsef'logfire.{custom_scope_suffix}',)
defforce_flush(self,timeout_millis:int=3_000)->bool:# pragma: no cover"""Force flush all spans and metrics. Args: timeout_millis: The timeout in milliseconds. Returns: Whether the flush of spans was successful. """returnself._config.force_flush(timeout_millis)
A context manager that will revert the patch when exited.
This context manager doesn't take into account threads or other concurrency.
Calling this method will immediately apply the patch
without waiting for the context manager to be opened,
i.e. it's not necessary to use this as a context manager.
Source code in logfire/_internal/main.py
830831832833834835836837838839840841842843844845
deflog_slow_async_callbacks(self,slow_duration:float=0.1)->ContextManager[None]:"""Log a warning whenever a function running in the asyncio event loop blocks for too long. This works by patching the `asyncio.events.Handle._run` method. Args: slow_duration: the threshold in seconds for when a callback is considered slow. Returns: A context manager that will revert the patch when exited. This context manager doesn't take into account threads or other concurrency. Calling this method will immediately apply the patch without waiting for the context manager to be opened, i.e. it's not necessary to use this as a context manager. """returnasync_.log_slow_callbacks(self,slow_duration)
This will trace all non-generator function calls in the modules specified by the modules argument.
It's equivalent to wrapping the body of every function in matching modules in with logfire.span(...):.
Note
This function MUST be called before any of the modules to be traced are imported.
Generator functions will not be traced for reasons explained here.
This works by inserting a new meta path finder into sys.meta_path, so inserting another finder before it
may prevent it from working.
It relies on being able to retrieve the source code via at least one other existing finder in the meta path,
so it may not work if standard finders are not present or if the source code is not available.
A modified version of the source code is then compiled and executed in place of the original module.
List of module names to trace, or a function which returns True for modules that should be traced.
If a list is provided, any submodules within a given module will also be traced.
A minimum duration in seconds for which a function must run before it's traced.
Setting to 0 causes all functions to be traced from the beginning.
Otherwise, the first time(s) each function is called, it will be timed but not traced.
Only after the function has run for at least min_duration will it be traced in subsequent calls.
If this is 'error' (the default), then an exception will be raised if any of the
modules in sys.modules (i.e. modules that have already been imported) match the modules to trace.
Set to 'warn' to issue a warning instead, or 'ignore' to skip the check.
definstall_auto_tracing(self,modules:Sequence[str]|Callable[[AutoTraceModule],bool],*,min_duration:float,check_imported_modules:Literal['error','warn','ignore']='error',)->None:"""Install automatic tracing. See the [Auto-Tracing guide](https://logfire.pydantic.dev/docs/guides/onboarding_checklist/add_auto_tracing/) for more info. This will trace all non-generator function calls in the modules specified by the modules argument. It's equivalent to wrapping the body of every function in matching modules in `with logfire.span(...):`. !!! note This function MUST be called before any of the modules to be traced are imported. Generator functions will not be traced for reasons explained [here](https://logfire.pydantic.dev/docs/guides/advanced/generators/). This works by inserting a new meta path finder into `sys.meta_path`, so inserting another finder before it may prevent it from working. It relies on being able to retrieve the source code via at least one other existing finder in the meta path, so it may not work if standard finders are not present or if the source code is not available. A modified version of the source code is then compiled and executed in place of the original module. Args: modules: List of module names to trace, or a function which returns True for modules that should be traced. If a list is provided, any submodules within a given module will also be traced. min_duration: A minimum duration in seconds for which a function must run before it's traced. Setting to `0` causes all functions to be traced from the beginning. Otherwise, the first time(s) each function is called, it will be timed but not traced. Only after the function has run for at least `min_duration` will it be traced in subsequent calls. check_imported_modules: If this is `'error'` (the default), then an exception will be raised if any of the modules in `sys.modules` (i.e. modules that have already been imported) match the modules to trace. Set to `'warn'` to issue a warning instead, or `'ignore'` to skip the check. """install_auto_tracing(self,modules,check_imported_modules=check_imported_modules,min_duration=min_duration)
definstrument_pydantic(self,record:PydanticPluginRecordValues='all',include:Iterable[str]=(),exclude:Iterable[str]=(),)->None:"""Instrument Pydantic model validations. This must be called before defining and importing the model classes you want to instrument. See the [Pydantic integration guide](https://logfire.pydantic.dev/docs/integrations/pydantic/) for more info. Args: record: The record mode for the Pydantic plugin. It can be one of the following values: - `all`: Send traces and metrics for all events. This is default value. - `failure`: Send metrics for all validations and traces only for validation failures. - `metrics`: Send only metrics. - `off`: Disable instrumentation. include: By default, third party modules are not instrumented. This option allows you to include specific modules. exclude: Exclude specific modules from instrumentation. """# Note that unlike most instrument_* methods, we intentionally don't call# _warn_if_not_initialized_for_instrumentation, because this method needs to be called early.ifrecord!='off':importpydanticifget_version(pydantic.__version__)<get_version('2.5.0'):raiseRuntimeError('The Pydantic plugin requires Pydantic 2.5.0 or newer.')fromlogfire.integrations.pydanticimportPydanticPlugin,set_pydantic_plugin_configifisinstance(include,str):include={include}ifisinstance(exclude,str):exclude={exclude}# TODO instrument using this instance, i.e. pass `self` somewhere, rather than always using the global instanceset_pydantic_plugin_config(PydanticPlugin(record=record,include=set(include),exclude=set(exclude),))
A function that takes a Request or WebSocket
and a dictionary of attributes and returns a new dictionary of attributes.
The input dictionary will contain:
values: A dictionary mapping argument names of the endpoint function to parsed and validated values.
errors: A list of validation errors for any invalid inputs.
The returned dictionary will be used as the attributes for a log message.
If None is returned, no log message will be created.
You can use this to e.g. only log validation errors, or nothing at all.
You can also add custom attributes.
The default implementation will return the input dictionary unchanged.
The function mustn't modify the contents of values or errors.
A string of comma-separated regexes which will exclude a request from tracing if the full URL
matches any of the regexes. This applies to both the Logfire and OpenTelemetry instrumentation.
If not provided, the environment variables
OTEL_PYTHON_FASTAPI_EXCLUDED_URLS and OTEL_PYTHON_EXCLUDED_URLS will be checked.
Set to True to allow the OpenTelemetry ASGI middleware to create send/receive spans.
These are disabled by default to reduce overhead and the number of spans created,
since many can be created for a single request, and they are not often useful.
If enabled, they will be set to debug level, meaning they will usually still be hidden in the UI.
A context manager that will revert the instrumentation when exited.
This context manager doesn't take into account threads or other concurrency.
Calling this method will immediately apply the instrumentation
without waiting for the context manager to be opened,
i.e. it's not necessary to use this as a context manager.
definstrument_fastapi(self,app:FastAPI,*,capture_headers:bool=False,request_attributes_mapper:Callable[[Request|WebSocket,dict[str,Any],],dict[str,Any]|None,]|None=None,excluded_urls:str|Iterable[str]|None=None,record_send_receive:bool=False,**opentelemetry_kwargs:Any,)->ContextManager[None]:"""Instrument a FastAPI app so that spans and logs are automatically created for each request. Uses the [OpenTelemetry FastAPI Instrumentation](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html) under the hood, with some additional features. Args: app: The FastAPI app to instrument. capture_headers: Set to `True` to capture all request and response headers. request_attributes_mapper: A function that takes a [`Request`][fastapi.Request] or [`WebSocket`][fastapi.WebSocket] and a dictionary of attributes and returns a new dictionary of attributes. The input dictionary will contain: - `values`: A dictionary mapping argument names of the endpoint function to parsed and validated values. - `errors`: A list of validation errors for any invalid inputs. The returned dictionary will be used as the attributes for a log message. If `None` is returned, no log message will be created. You can use this to e.g. only log validation errors, or nothing at all. You can also add custom attributes. The default implementation will return the input dictionary unchanged. The function mustn't modify the contents of `values` or `errors`. excluded_urls: A string of comma-separated regexes which will exclude a request from tracing if the full URL matches any of the regexes. This applies to both the Logfire and OpenTelemetry instrumentation. If not provided, the environment variables `OTEL_PYTHON_FASTAPI_EXCLUDED_URLS` and `OTEL_PYTHON_EXCLUDED_URLS` will be checked. record_send_receive: Set to `True` to allow the OpenTelemetry ASGI middleware to create send/receive spans. These are disabled by default to reduce overhead and the number of spans created, since many can be created for a single request, and they are not often useful. If enabled, they will be set to debug level, meaning they will usually still be hidden in the UI. opentelemetry_kwargs: Additional keyword arguments to pass to the OpenTelemetry FastAPI instrumentation. Returns: A context manager that will revert the instrumentation when exited. This context manager doesn't take into account threads or other concurrency. Calling this method will immediately apply the instrumentation without waiting for the context manager to be opened, i.e. it's not necessary to use this as a context manager. """from.integrations.fastapiimportinstrument_fastapiself._warn_if_not_initialized_for_instrumentation()returninstrument_fastapi(self,app,capture_headers=capture_headers,request_attributes_mapper=request_attributes_mapper,excluded_urls=excluded_urls,record_send_receive=record_send_receive,**opentelemetry_kwargs,)
When stream=True a second span is created to instrument the streamed response.
Example usage:
importlogfireimportopenaiclient=openai.OpenAI()logfire.configure()logfire.instrument_openai(client)response=client.chat.completions.create(model='gpt-4',messages=[{'role':'system','content':'You are a helpful assistant.'},{'role':'user','content':'What is four plus five?'},],)print('answer:',response.choices[0].message.content)
If True, suppress any other OTEL instrumentation that may be otherwise
enabled. In reality, this means the HTTPX instrumentation, which could otherwise be called since
OpenAI uses HTTPX to make HTTP requests.
definstrument_openai(self,openai_client:openai.OpenAI|openai.AsyncOpenAI|type[openai.OpenAI]|type[openai.AsyncOpenAI]|None=None,*,suppress_other_instrumentation:bool=True,)->ContextManager[None]:"""Instrument an OpenAI client so that spans are automatically created for each request. The following methods are instrumented for both the sync and the async clients: - [`client.chat.completions.create`](https://platform.openai.com/docs/guides/text-generation/chat-completions-api) — with and without `stream=True` - [`client.completions.create`](https://platform.openai.com/docs/guides/text-generation/completions-api) — with and without `stream=True` - [`client.embeddings.create`](https://platform.openai.com/docs/guides/embeddings/how-to-get-embeddings) - [`client.images.generate`](https://platform.openai.com/docs/guides/images/generations) When `stream=True` a second span is created to instrument the streamed response. Example usage: ```python import logfire import openai client = openai.OpenAI() logfire.configure() logfire.instrument_openai(client) response = client.chat.completions.create( model='gpt-4', messages=[ {'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'What is four plus five?'}, ], ) print('answer:', response.choices[0].message.content) ``` Args: openai_client: The OpenAI client or class to instrument: - `None` (the default) to instrument both the `openai.OpenAI` and `openai.AsyncOpenAI` classes. - The `openai.OpenAI` class or a subclass - The `openai.AsyncOpenAI` class or a subclass - An instance of `openai.OpenAI` - An instance of `openai.AsyncOpenAI` suppress_other_instrumentation: If True, suppress any other OTEL instrumentation that may be otherwise enabled. In reality, this means the HTTPX instrumentation, which could otherwise be called since OpenAI uses HTTPX to make HTTP requests. Returns: A context manager that will revert the instrumentation when exited. Use of this context manager is optional. """importopenaifrom.integrations.llm_providers.llm_providerimportinstrument_llm_providerfrom.integrations.llm_providers.openaiimportget_endpoint_config,is_async_client,on_responseself._warn_if_not_initialized_for_instrumentation()returninstrument_llm_provider(self,openai_clientor(openai.OpenAI,openai.AsyncOpenAI),suppress_other_instrumentation,'OpenAI',get_endpoint_config,on_response,is_async_client,)
When stream=True a second span is created to instrument the streamed response.
Example usage:
importlogfireimportanthropicclient=anthropic.Anthropic()logfire.configure()logfire.instrument_anthropic(client)response=client.messages.create(model='claude-3-haiku-20240307',system='You are a helpful assistant.',messages=[{'role':'user','content':'What is four plus five?'},],)print('answer:',response.content[0].text)
The Anthropic client or class to instrument:
- None (the default) to instrument all Anthropic client types
- The anthropic.Anthropic or anthropic.AnthropicBedrock class or subclass
- The anthropic.AsyncAnthropic or anthropic.AsyncAnthropicBedrock class or subclass
- An instance of any of the above classes
If True, suppress any other OTEL instrumentation that may be otherwise
enabled. In reality, this means the HTTPX instrumentation, which could otherwise be called since
OpenAI uses HTTPX to make HTTP requests.
definstrument_anthropic(self,anthropic_client:(anthropic.Anthropic|anthropic.AsyncAnthropic|anthropic.AnthropicBedrock|anthropic.AsyncAnthropicBedrock|type[anthropic.Anthropic]|type[anthropic.AsyncAnthropic]|type[anthropic.AnthropicBedrock]|type[anthropic.AsyncAnthropicBedrock]|None)=None,*,suppress_other_instrumentation:bool=True,)->ContextManager[None]:"""Instrument an Anthropic client so that spans are automatically created for each request. The following methods are instrumented for both the sync and async clients: - [`client.messages.create`](https://docs.anthropic.com/en/api/messages) - [`client.messages.stream`](https://docs.anthropic.com/en/api/messages-streaming) - [`client.beta.tools.messages.create`](https://docs.anthropic.com/en/docs/tool-use) When `stream=True` a second span is created to instrument the streamed response. Example usage: ```python import logfire import anthropic client = anthropic.Anthropic() logfire.configure() logfire.instrument_anthropic(client) response = client.messages.create( model='claude-3-haiku-20240307', system='You are a helpful assistant.', messages=[ {'role': 'user', 'content': 'What is four plus five?'}, ], ) print('answer:', response.content[0].text) ``` Args: anthropic_client: The Anthropic client or class to instrument: - `None` (the default) to instrument all Anthropic client types - The `anthropic.Anthropic` or `anthropic.AnthropicBedrock` class or subclass - The `anthropic.AsyncAnthropic` or `anthropic.AsyncAnthropicBedrock` class or subclass - An instance of any of the above classes suppress_other_instrumentation: If True, suppress any other OTEL instrumentation that may be otherwise enabled. In reality, this means the HTTPX instrumentation, which could otherwise be called since OpenAI uses HTTPX to make HTTP requests. Returns: A context manager that will revert the instrumentation when exited. Use of this context manager is optional. """importanthropicfrom.integrations.llm_providers.anthropicimportget_endpoint_config,is_async_client,on_responsefrom.integrations.llm_providers.llm_providerimportinstrument_llm_providerself._warn_if_not_initialized_for_instrumentation()returninstrument_llm_provider(self,anthropic_clientor(anthropic.Anthropic,anthropic.AsyncAnthropic,anthropic.AnthropicBedrock,anthropic.AsyncAnthropicBedrock,),suppress_other_instrumentation,'Anthropic',get_endpoint_config,on_response,is_async_client,)
Instrument the asyncpg module so that spans are automatically created for each query.
Source code in logfire/_internal/main.py
116811691170117111721173117411751176117711781179
definstrument_asyncpg(self,**kwargs:Any)->None:"""Instrument the `asyncpg` module so that spans are automatically created for each query."""from.integrations.asyncpgimportinstrument_asyncpgself._warn_if_not_initialized_for_instrumentation()returninstrument_asyncpg(**{'tracer_provider':self._config.get_tracer_provider(),'meter_provider':self._config.get_meter_provider(),**kwargs,},)
definstrument_httpx(self,client:httpx.Client|httpx.AsyncClient|None=None,*,capture_headers:bool=False,capture_request_body:bool=False,capture_response_body:bool=False,request_hook:HttpxRequestHook|HttpxAsyncRequestHook|None=None,response_hook:HttpxResponseHook|HttpxAsyncResponseHook|None=None,async_request_hook:HttpxAsyncRequestHook|None=None,async_response_hook:HttpxAsyncResponseHook|None=None,**kwargs:Any,)->None:"""Instrument the `httpx` module so that spans are automatically created for each request. Optionally, pass an `httpx.Client` instance to instrument only that client. Uses the [OpenTelemetry HTTPX Instrumentation](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/httpx/httpx.html) library, specifically `HTTPXClientInstrumentor().instrument()`, to which it passes `**kwargs`. Args: client: The `httpx.Client` or `httpx.AsyncClient` instance to instrument. If `None`, the default, all clients will be instrumented. capture_headers: Set to `True` to capture all HTTP headers. If you don't want to capture all headers, you can customize the headers captured. See the [Capture Headers](https://logfire.pydantic.dev/docs/guides/advanced/capture_headers/) section for more info. capture_request_body: Set to `True` to capture the request body. capture_response_body: Set to `True` to capture the response body. request_hook: A function called right after a span is created for a request. response_hook: A function called right before a span is finished for the response. async_request_hook: A function called right after a span is created for an async request. async_response_hook: A function called right before a span is finished for an async response. **kwargs: Additional keyword arguments to pass to the OpenTelemetry `instrument` method, for future compatibility. """from.integrations.httpximportinstrument_httpxself._warn_if_not_initialized_for_instrumentation()returninstrument_httpx(self,client,capture_headers=capture_headers,capture_request_body=capture_request_body,capture_response_body=capture_response_body,request_hook=request_hook,response_hook=response_hook,async_request_hook=async_request_hook,async_response_hook=async_response_hook,**kwargs,)
definstrument_celery(self,**kwargs:Any)->None:"""Instrument `celery` so that spans are automatically created for each task. Uses the [OpenTelemetry Celery Instrumentation](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/celery/celery.html) library. Args: **kwargs: Additional keyword arguments to pass to the OpenTelemetry `instrument` method, for future compatibility. """from.integrations.celeryimportinstrument_celeryself._warn_if_not_initialized_for_instrumentation()returninstrument_celery(**{'tracer_provider':self._config.get_tracer_provider(),'meter_provider':self._config.get_meter_provider(),**kwargs,},)
Adds comments to SQL queries performed by Django,
so that database logs have additional context.
This does NOT create spans/logs for the queries themselves.
For that you need to instrument the database driver, e.g. with logfire.instrument_psycopg().
To configure the SQL Commentor, see the OpenTelemetry documentation for the
values that need to be added to settings.py.
A function called right before a span is finished for the response.
The function should accept three arguments:
the span, the Django Request object, and the Django Response object.
definstrument_django(self,capture_headers:bool=False,is_sql_commentor_enabled:bool|None=None,request_hook:Callable[[trace_api.Span,HttpRequest],None]|None=None,response_hook:Callable[[trace_api.Span,HttpRequest,HttpResponse],None]|None=None,excluded_urls:str|None=None,**kwargs:Any,)->None:"""Instrument `django` so that spans are automatically created for each web request. Uses the [OpenTelemetry Django Instrumentation](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/django/django.html) library. Args: capture_headers: Set to `True` to capture all request and response headers. is_sql_commentor_enabled: Adds comments to SQL queries performed by Django, so that database logs have additional context. This does NOT create spans/logs for the queries themselves. For that you need to instrument the database driver, e.g. with `logfire.instrument_psycopg()`. To configure the SQL Commentor, see the OpenTelemetry documentation for the values that need to be added to `settings.py`. request_hook: A function called right after a span is created for a request. The function should accept two arguments: the span and the Django `Request` object. response_hook: A function called right before a span is finished for the response. The function should accept three arguments: the span, the Django `Request` object, and the Django `Response` object. excluded_urls: A string containing a comma-delimited list of regexes used to exclude URLs from tracking. **kwargs: Additional keyword arguments to pass to the OpenTelemetry `instrument` method, for future compatibility. """from.integrations.djangoimportinstrument_djangoself._warn_if_not_initialized_for_instrumentation()returninstrument_django(capture_headers=capture_headers,is_sql_commentor_enabled=is_sql_commentor_enabled,request_hook=request_hook,response_hook=response_hook,excluded_urls=excluded_urls,**{'tracer_provider':self._config.get_tracer_provider(),'meter_provider':self._config.get_meter_provider(),**kwargs,},)
definstrument_requests(self,excluded_urls:str|None=None,request_hook:Callable[[Span,requests.PreparedRequest],None]|None=None,response_hook:Callable[[Span,requests.PreparedRequest,requests.Response],None]|None=None,**kwargs:Any,)->None:"""Instrument the `requests` module so that spans are automatically created for each request. Args: excluded_urls: A string containing a comma-delimited list of regexes used to exclude URLs from tracking request_hook: A function called right after a span is created for a request. response_hook: A function called right before a span is finished for the response. **kwargs: Additional keyword arguments to pass to the OpenTelemetry `instrument` methods, for future compatibility. """from.integrations.requestsimportinstrument_requestsself._warn_if_not_initialized_for_instrumentation()returninstrument_requests(excluded_urls=excluded_urls,request_hook=request_hook,response_hook=response_hook,**{'tracer_provider':self._config.get_tracer_provider(),'meter_provider':self._config.get_meter_provider(),**kwargs,},)
definstrument_flask(self,app:Flask,*,capture_headers:bool=False,enable_commenter:bool=True,commenter_options:FlaskCommenterOptions|None=None,exclude_urls:str|None=None,request_hook:FlaskRequestHook|None=None,response_hook:FlaskResponseHook|None=None,**kwargs:Any,)->None:"""Instrument `app` so that spans are automatically created for each request. Uses the [OpenTelemetry Flask Instrumentation](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/flask/flask.html) library, specifically `FlaskInstrumentor().instrument_app()`, to which it passes `**kwargs`. Args: app: The Flask app to instrument. capture_headers: Set to `True` to capture all request and response headers. enable_commenter: Adds comments to SQL queries performed by Flask, so that database logs have additional context. commenter_options: Configure the tags to be added to the SQL comments. See more about it on the [SQLCommenter Configurations](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/flask/flask.html#sqlcommenter-configurations). exclude_urls: A string containing a comma-delimited list of regexes used to exclude URLs from tracking. request_hook: A function called right after a span is created for a request. response_hook: A function called right before a span is finished for the response. **kwargs: Additional keyword arguments to pass to the OpenTelemetry Flask instrumentation. """from.integrations.flaskimportinstrument_flaskself._warn_if_not_initialized_for_instrumentation()returninstrument_flask(app,capture_headers=capture_headers,enable_commenter=enable_commenter,commenter_options=commenter_options,exclude_urls=exclude_urls,request_hook=request_hook,response_hook=response_hook,**{'tracer_provider':self._config.get_tracer_provider(),'meter_provider':self._config.get_meter_provider(),**kwargs,},)
Set to True to allow the OpenTelemetry ASGI middleware to create send/receive spans.
These are disabled by default to reduce overhead and the number of spans created,
since many can be created for a single request, and they are not often useful.
If enabled, they will be set to debug level, meaning they will usually still be hidden in the UI.
definstrument_starlette(self,app:Starlette,*,capture_headers:bool=False,record_send_receive:bool=False,server_request_hook:ServerRequestHook|None=None,client_request_hook:ClientRequestHook|None=None,client_response_hook:ClientResponseHook|None=None,**kwargs:Any,)->None:"""Instrument `app` so that spans are automatically created for each request. Uses the [OpenTelemetry Starlette Instrumentation](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/starlette/starlette.html) library, specifically `StarletteInstrumentor.instrument_app()`, to which it passes `**kwargs`. Args: app: The Starlette app to instrument. capture_headers: Set to `True` to capture all request and response headers. record_send_receive: Set to `True` to allow the OpenTelemetry ASGI middleware to create send/receive spans. These are disabled by default to reduce overhead and the number of spans created, since many can be created for a single request, and they are not often useful. If enabled, they will be set to debug level, meaning they will usually still be hidden in the UI. server_request_hook: A function that receives a server span and the ASGI scope for every incoming request. client_request_hook: A function that receives a span, the ASGI scope and the receive ASGI message for every ASGI receive event. client_response_hook: A function that receives a span, the ASGI scope and the send ASGI message for every ASGI send event. **kwargs: Additional keyword arguments to pass to the OpenTelemetry Starlette instrumentation. """from.integrations.starletteimportinstrument_starletteself._warn_if_not_initialized_for_instrumentation()returninstrument_starlette(self,app,record_send_receive=record_send_receive,capture_headers=capture_headers,server_request_hook=server_request_hook,client_request_hook=client_request_hook,client_response_hook=client_response_hook,**kwargs,)
Set to True to allow the OpenTelemetry ASGI middleware to create send/receive spans.
These are disabled by default to reduce overhead and the number of spans created,
since many can be created for a single request, and they are not often useful.
If enabled, they will be set to debug level, meaning they will usually still be hidden in the UI.
definstrument_asgi(self,app:ASGIApp,capture_headers:bool=False,record_send_receive:bool=False,**kwargs:Unpack[ASGIInstrumentKwargs],)->ASGIApp:"""Instrument `app` so that spans are automatically created for each request. Uses the ASGI [`OpenTelemetryMiddleware`][opentelemetry.instrumentation.asgi.OpenTelemetryMiddleware] under the hood, to which it passes `**kwargs`. Warning: Instead of modifying the app in place, this method returns the instrumented ASGI application. Args: app: The ASGI application to instrument. capture_headers: Set to `True` to capture all request and response headers. record_send_receive: Set to `True` to allow the OpenTelemetry ASGI middleware to create send/receive spans. These are disabled by default to reduce overhead and the number of spans created, since many can be created for a single request, and they are not often useful. If enabled, they will be set to debug level, meaning they will usually still be hidden in the UI. **kwargs: Additional keyword arguments to pass to the OpenTelemetry ASGI middleware. Returns: The instrumented ASGI application. """from.integrations.asgiimportinstrument_asgiself._warn_if_not_initialized_for_instrumentation()returninstrument_asgi(self,app,record_send_receive=record_send_receive,capture_headers=capture_headers,**kwargs,)
definstrument_wsgi(self,app:WSGIApplication,capture_headers:bool=False,request_hook:WSGIRequestHook|None=None,response_hook:WSGIResponseHook|None=None,**kwargs:Any,)->WSGIApplication:"""Instrument `app` so that spans are automatically created for each request. Uses the WSGI [`OpenTelemetryMiddleware`][opentelemetry.instrumentation.wsgi.OpenTelemetryMiddleware] under the hood, to which it passes `**kwargs`. Warning: Instead of modifying the app in place, this method returns the instrumented WSGI application. Args: app: The WSGI application to instrument. capture_headers: Set to `True` to capture all request and response headers. request_hook: A function called right after a span is created for a request. response_hook: A function called right before a span is finished for the response. **kwargs: Additional keyword arguments to pass to the OpenTelemetry WSGI middleware. Returns: The instrumented WSGI application. """from.integrations.wsgiimportinstrument_wsgiself._warn_if_not_initialized_for_instrumentation()returninstrument_wsgi(app,capture_headers=capture_headers,request_hook=request_hook,response_hook=response_hook,**{'tracer_provider':self._config.get_tracer_provider(),'meter_provider':self._config.get_meter_provider(),**kwargs,},)
definstrument_aiohttp_client(self,**kwargs:Any)->None:"""Instrument the `aiohttp` module so that spans are automatically created for each client request. Uses the [OpenTelemetry aiohttp client Instrumentation](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/aiohttp_client/aiohttp_client.html) library, specifically `AioHttpClientInstrumentor().instrument()`, to which it passes `**kwargs`. """from.integrations.aiohttp_clientimportinstrument_aiohttp_clientself._warn_if_not_initialized_for_instrumentation()returninstrument_aiohttp_client(self,**kwargs)
definstrument_sqlalchemy(self,engine:AsyncEngine|Engine|None=None,enable_commenter:bool=False,commenter_options:SQLAlchemyCommenterOptions|None=None,**kwargs:Any,)->None:"""Instrument the `sqlalchemy` module so that spans are automatically created for each query. Uses the [OpenTelemetry SQLAlchemy Instrumentation](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/sqlalchemy/sqlalchemy.html) library, specifically `SQLAlchemyInstrumentor().instrument()`, to which it passes `**kwargs`. Args: engine: The `sqlalchemy` engine to instrument, or `None` to instrument all engines. enable_commenter: Adds comments to SQL queries performed by SQLAlchemy, so that database logs have additional context. commenter_options: Configure the tags to be added to the SQL comments. **kwargs: Additional keyword arguments to pass to the OpenTelemetry `instrument` methods. """from.integrations.sqlalchemyimportinstrument_sqlalchemyself._warn_if_not_initialized_for_instrumentation()returninstrument_sqlalchemy(engine=engine,enable_commenter=enable_commenter,commenter_options=commenter_optionsor{},**{'tracer_provider':self._config.get_tracer_provider(),'meter_provider':self._config.get_meter_provider(),**kwargs,},)
definstrument_sqlite3(self,conn:SQLite3Connection=None,**kwargs:Any)->SQLite3Connection:"""Instrument the `sqlite3` module or a specific connection so that spans are automatically created for each operation. Uses the [OpenTelemetry SQLite3 Instrumentation](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/sqlite3/sqlite3.html) library. Args: conn: The `sqlite3` connection to instrument, or `None` to instrument all connections. **kwargs: Additional keyword arguments to pass to the OpenTelemetry `instrument` methods. Returns: If a connection is provided, returns the instrumented connection. If no connection is provided, returns `None`. """from.integrations.sqlite3importinstrument_sqlite3self._warn_if_not_initialized_for_instrumentation()returninstrument_sqlite3(conn=conn,**{'tracer_provider':self._config.get_tracer_provider(),**kwargs})
definstrument_aws_lambda(self,lambda_handler:LambdaHandler,event_context_extractor:Callable[[LambdaEvent],Context]|None=None,**kwargs:Any,)->None:"""Instrument AWS Lambda so that spans are automatically created for each invocation. Uses the [OpenTelemetry AWS Lambda Instrumentation](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/aws_lambda/aws_lambda.html) library, specifically `AwsLambdaInstrumentor().instrument()`, to which it passes `**kwargs`. Args: lambda_handler: The lambda handler function to instrument. event_context_extractor: A function that returns an OTel Trace Context given the Lambda Event the AWS. **kwargs: Additional keyword arguments to pass to the OpenTelemetry `instrument` methods for future compatibility. """from.integrations.aws_lambdaimportinstrument_aws_lambdaself._warn_if_not_initialized_for_instrumentation()returninstrument_aws_lambda(lambda_handler=lambda_handler,event_context_extractor=event_context_extractor,**{# type: ignore'tracer_provider':self._config.get_tracer_provider(),'meter_provider':self._config.get_meter_provider(),**kwargs,},)
definstrument_mysql(self,conn:MySQLConnection=None,**kwargs:Any)->MySQLConnection:"""Instrument the `mysql` module or a specific MySQL connection so that spans are automatically created for each operation. Uses the [OpenTelemetry MySQL Instrumentation](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/mysql/mysql.html) library. Args: conn: The `mysql` connection to instrument, or `None` to instrument all connections. **kwargs: Additional keyword arguments to pass to the OpenTelemetry `instrument` methods. Returns: If a connection is provided, returns the instrumented connection. If no connection is provided, returns None. """from.integrations.mysqlimportinstrument_mysqlself._warn_if_not_initialized_for_instrumentation()returninstrument_mysql(conn=conn,**{# type: ignore'tracer_provider':self._config.get_tracer_provider(),'meter_provider':self._config.get_meter_provider(),**kwargs,},)
definstrument_system_metrics(self,config:SystemMetricsConfig|None=None,base:SystemMetricsBase='basic')->None:"""Collect system metrics. See [the guide](https://logfire.pydantic.dev/docs/integrations/system-metrics/) for more information. Args: config: A dictionary where the keys are metric names and the values are optional further configuration for that metric. base: A string indicating the base config dictionary which `config` will be merged with, or `None` for an empty base config. """from.integrations.system_metricsimportinstrument_system_metricsself._warn_if_not_initialized_for_instrumentation()returninstrument_system_metrics(self,config,base)
A counter is a cumulative metric that represents a single numerical value that only ever goes up.
importlogfirelogfire.configure()counter=logfire.metric_counter('exceptions',unit='1',description='Number of exceptions caught')try:raiseException('oops')exceptException:counter.add(1)
defmetric_counter(self,name:str,*,unit:str='',description:str='')->Counter:"""Create a counter metric. A counter is a cumulative metric that represents a single numerical value that only ever goes up. ```py import logfire logfire.configure() counter = logfire.metric_counter('exceptions', unit='1', description='Number of exceptions caught') try: raise Exception('oops') except Exception: counter.add(1) ``` See the [Opentelemetry documentation](https://opentelemetry.io/docs/specs/otel/metrics/api/#counter) about counters. Args: name: The name of the metric. unit: The unit of the metric. description: The description of the metric. Returns: The counter metric. """returnself._meter.create_counter(name,unit,description)
defmetric_histogram(self,name:str,*,unit:str='',description:str='')->Histogram:"""Create a histogram metric. A histogram is a metric that samples observations (usually things like request durations or response sizes). ```py import logfire logfire.configure() histogram = logfire.metric_histogram('bank.amount_transferred', unit='$', description='Amount transferred') def transfer(amount: int): histogram.record(amount) ``` See the [Opentelemetry documentation](https://opentelemetry.io/docs/specs/otel/metrics/api/#histogram) about Args: name: The name of the metric. unit: The unit of the metric. description: The description of the metric. Returns: The histogram metric. """returnself._meter.create_histogram(name,unit,description)
defmetric_gauge(self,name:str,*,unit:str='',description:str='')->Gauge:"""Create a gauge metric. Gauge is a synchronous instrument which can be used to record non-additive measurements. ```py import logfire logfire.configure() gauge = logfire.metric_gauge('system.cpu_usage', unit='%', description='CPU usage') def update_cpu_usage(cpu_percent): gauge.set(cpu_percent) ``` See the [Opentelemetry documentation](https://opentelemetry.io/docs/specs/otel/metrics/api/#gauge) about gauges. Args: name: The name of the metric. unit: The unit of the metric. description: The description of the metric. Returns: The gauge metric. """returnself._meter.create_gauge(name,unit,description)
defmetric_up_down_counter(self,name:str,*,unit:str='',description:str='')->UpDownCounter:"""Create an up-down counter metric. An up-down counter is a cumulative metric that represents a single numerical value that can be adjusted up or down. ```py import logfire logfire.configure() up_down_counter = logfire.metric_up_down_counter('users.logged_in', unit='1', description='Users logged in') def on_login(user): up_down_counter.add(1) def on_logout(user): up_down_counter.add(-1) ``` See the [Opentelemetry documentation](https://opentelemetry.io/docs/specs/otel/metrics/api/#updowncounter) about up-down counters. Args: name: The name of the metric. unit: The unit of the metric. description: The description of the metric. Returns: The up-down counter metric. """returnself._meter.create_up_down_counter(name,unit,description)
defmetric_counter_callback(self,name:str,*,callbacks:Sequence[CallbackT],unit:str='',description:str='',)->None:"""Create a counter metric that uses a callback to collect observations. The counter metric is a cumulative metric that represents a single numerical value that only ever goes up. ```py import logfire import psutil from opentelemetry.metrics import CallbackOptions, Observation logfire.configure() def cpu_usage_callback(options: CallbackOptions): cpu_percents = psutil.cpu_percent(percpu=True) for i, cpu_percent in enumerate(cpu_percents): yield Observation(cpu_percent, {'cpu': i}) cpu_usage_counter = logfire.metric_counter_callback( 'system.cpu.usage', callbacks=[cpu_usage_callback], unit='%', description='CPU usage', ) ``` See the [Opentelemetry documentation](https://opentelemetry.io/docs/specs/otel/metrics/api/#asynchronous-counter) about asynchronous counter. Args: name: The name of the metric. callbacks: A sequence of callbacks that return an iterable of [Observation](https://opentelemetry-python.readthedocs.io/en/latest/api/metrics.html#opentelemetry.metrics.Observation). unit: The unit of the metric. description: The description of the metric. """self._meter.create_observable_counter(name,callbacks,unit,description)
Create a gauge metric that uses a callback to collect observations.
The gauge metric is a metric that represents a single numerical value that can arbitrarily go up and down.
importthreadingimportlogfirefromopentelemetry.metricsimportCallbackOptions,Observationlogfire.configure()defthread_count_callback(options:CallbackOptions):yieldObservation(threading.active_count())logfire.metric_gauge_callback('system.thread_count',callbacks=[thread_count_callback],unit='1',description='Number of threads',)
defmetric_gauge_callback(self,name:str,callbacks:Sequence[CallbackT],*,unit:str='',description:str='')->None:"""Create a gauge metric that uses a callback to collect observations. The gauge metric is a metric that represents a single numerical value that can arbitrarily go up and down. ```py import threading import logfire from opentelemetry.metrics import CallbackOptions, Observation logfire.configure() def thread_count_callback(options: CallbackOptions): yield Observation(threading.active_count()) logfire.metric_gauge_callback( 'system.thread_count', callbacks=[thread_count_callback], unit='1', description='Number of threads', ) ``` See the [Opentelemetry documentation](https://opentelemetry.io/docs/specs/otel/metrics/api/#asynchronous-gauge) about asynchronous gauge. Args: name: The name of the metric. callbacks: A sequence of callbacks that return an iterable of [Observation](https://opentelemetry-python.readthedocs.io/en/latest/api/metrics.html#opentelemetry.metrics.Observation). unit: The unit of the metric. description: The description of the metric. """self._meter.create_observable_gauge(name,callbacks,unit,description)
Create an up-down counter metric that uses a callback to collect observations.
The up-down counter is a cumulative metric that represents a single numerical value that can be adjusted up or
down.
importlogfirefromopentelemetry.metricsimportCallbackOptions,Observationlogfire.configure()items=[]definventory_callback(options:CallbackOptions):yieldObservation(len(items))logfire.metric_up_down_counter_callback(name='store.inventory',description='Number of items in the inventory',callbacks=[inventory_callback],)
defmetric_up_down_counter_callback(self,name:str,callbacks:Sequence[CallbackT],*,unit:str='',description:str='')->None:"""Create an up-down counter metric that uses a callback to collect observations. The up-down counter is a cumulative metric that represents a single numerical value that can be adjusted up or down. ```py import logfire from opentelemetry.metrics import CallbackOptions, Observation logfire.configure() items = [] def inventory_callback(options: CallbackOptions): yield Observation(len(items)) logfire.metric_up_down_counter_callback( name='store.inventory', description='Number of items in the inventory', callbacks=[inventory_callback], ) ``` See the [Opentelemetry documentation](https://opentelemetry.io/docs/specs/otel/metrics/api/#asynchronous-updowncounter) about asynchronous up-down counters. Args: name: The name of the metric. callbacks: A sequence of callbacks that return an iterable of [Observation](https://opentelemetry-python.readthedocs.io/en/latest/api/metrics.html#opentelemetry.metrics.Observation). unit: The unit of the metric. description: The description of the metric. """self._meter.create_observable_up_down_counter(name,callbacks,unit,description)
Prevent spans and metrics from being created for the given OpenTelemetry scope names.
To get the scope name of a span/metric,
check the value of the otel_scope_name column in the Logfire database.
Source code in logfire/_internal/main.py
2050205120522053205420552056
defsuppress_scopes(self,*scopes:str)->None:"""Prevent spans and metrics from being created for the given OpenTelemetry scope names. To get the scope name of a span/metric, check the value of the `otel_scope_name` column in the Logfire database. """self._config.suppress_scopes(*scopes)
defshutdown(self,timeout_millis:int=30_000,flush:bool=True)->bool:# pragma: no cover"""Shut down all tracers and meters. This will clean up any resources used by the tracers and meters and flush any remaining spans and metrics. Args: timeout_millis: The timeout in milliseconds. flush: Whether to flush remaining spans and metrics before shutting down. Returns: `False` if the timeout was reached before the shutdown was completed, `True` otherwise. """start=time()ifflush:# pragma: no branchself._tracer_provider.force_flush(timeout_millis)remaining=max(0,timeout_millis-(time()-start))ifnotremaining:# pragma: no coverreturnFalseself._tracer_provider.shutdown()remaining=max(0,timeout_millis-(time()-start))ifnotremaining:# pragma: no coverreturnFalseifflush:# pragma: no branchself._meter_provider.force_flush(remaining)remaining=max(0,timeout_millis-(time()-start))ifnotremaining:# pragma: no coverreturnFalseself._meter_provider.shutdown(remaining)return(start-time())<timeout_millis
Logfire is the observability tool focused on developer experience.
An optional tail sampling callback which will be called for every span.
It should return a number between 0.0 and 1.0, the probability that the entire trace will be included.
Use SamplingOptions.level_or_duration
for a common use case.
Every span in a trace will be stored in memory until either the trace is included by tail sampling
or it's completed and discarded, so large traces may consume a lot of memory.
Returns a SamplingOptions instance that tail samples traces based on their log level and duration.
If a trace has at least one span/log that has a log level greater than or equal to level_threshold,
or if the duration of the whole trace is greater than duration_threshold seconds,
then the whole trace will be included.
Otherwise, the probability is background_rate.
The head parameter is the same as in the SamplingOptions constructor.
@classmethoddeflevel_or_duration(cls,*,head:float|Sampler=1.0,level_threshold:LevelName|None='notice',duration_threshold:float|None=5.0,background_rate:float=0.0,)->Self:"""Returns a `SamplingOptions` instance that tail samples traces based on their log level and duration. If a trace has at least one span/log that has a log level greater than or equal to `level_threshold`, or if the duration of the whole trace is greater than `duration_threshold` seconds, then the whole trace will be included. Otherwise, the probability is `background_rate`. The `head` parameter is the same as in the `SamplingOptions` constructor. """head_sample_rate=headifisinstance(head,(float,int))else1.0ifnot(0.0<=background_rate<=head_sample_rate<=1.0):raiseValueError('Invalid sampling rates, must be 0.0 <= background_rate <= head <= 1.0')defget_tail_sample_rate(span_info:TailSamplingSpanInfo)->float:ifduration_thresholdisnotNoneandspan_info.duration>duration_threshold:return1.0iflevel_thresholdisnotNoneandspan_info.level>=level_threshold:return1.0returnbackground_ratereturncls(head=head,tail=get_tail_sample_rate)
Information about a module being imported that should maybe be traced automatically.
This object will be passed to a function that should return True if the module should be traced.
In particular it'll be passed to a function that's passed to install_auto_tracing as the modules argument.
Return True if the module name starts with any of the given prefixes, using dots as boundaries.
For example, if the module name is foo.bar.spam, then parts_start_with('foo') will return True,
but parts_start_with('bar') or parts_start_with('foo_bar') will return False.
In other words, this will match the module itself or any submodules.
If a prefix contains any characters other than letters, numbers, and dots,
then it will be treated as a regular expression.
Source code in logfire/_internal/auto_trace/types.py
2425262728293031323334353637
defparts_start_with(self,prefix:str|Sequence[str])->bool:"""Return True if the module name starts with any of the given prefixes, using dots as boundaries. For example, if the module name is `foo.bar.spam`, then `parts_start_with('foo')` will return True, but `parts_start_with('bar')` or `parts_start_with('foo_bar')` will return False. In other words, this will match the module itself or any submodules. If a prefix contains any characters other than letters, numbers, and dots, then it will be treated as a regular expression. """ifisinstance(prefix,str):prefix=(prefix,)pattern='|'.join([get_module_pattern(p)forpinprefix])returnbool(re.match(pattern,self.name))
The root path for the source code in the repository.
If you run the code from the directory corresponding to the root of the repository, you can leave this blank.
Example
Suppose that your repository contains a/b/c/main.py, the folder a/b/ is copied
into the /docker/root/ folder of your docker container, and within the container
the command python ./b/c/main.py is run from within the /docker/root/a/ directory.
Then code.filepath will be b/c/main.py for spans created in that file, and the
root_path should be set to a so that the final link is a/b/c/main.py.
@handle_internal_errors()defset_attribute(self,key:str,value:Any)->None:"""Sets an attribute on the span. Args: key: The key of the attribute. value: The value of the attribute. """self._added_attributes=Trueself._json_schema_properties[key]=create_json_schema(value,set())key,otel_value=set_user_attribute(self._otlp_attributes,key,value)ifself._spanisnotNone:# pragma: no branchself._span.set_attribute(key,otel_value)
defset_attributes(self,attributes:dict[str,Any])->None:"""Sets the given attributes on the span."""forkey,valueinattributes.items():self.set_attribute(key,value)
defrecord_exception(self,exception:BaseException,attributes:otel_types.Attributes=None,timestamp:int|None=None,escaped:bool=False,)->None:# pragma: no cover"""Records an exception as a span event. Delegates to the OpenTelemetry SDK `Span.record_exception` method. """ifself._spanisNone:raiseRuntimeError('Span has not been started')# Check if the span has been sampled out first, since _record_exception is somewhat expensive.ifnotself._span.is_recording():returnrecord_exception(self._span,exception,attributes=attributes,timestamp=timestamp,escaped=escaped,)
@handle_internal_errors()defset_level(self,level:LevelName|int):"""Set the log level of this span."""attributes=log_level_attributes(level)ifself._spanisNone:self._otlp_attributes.update(attributes)else:self._span.set_attributes(attributes)
A function that is called for each match found by the scrubber.
If it returns None, the value is redacted.
Otherwise, the returned value replaces the matched value.
The function accepts a single argument of type logfire.ScrubMatch.
A sequence of regular expressions to detect sensitive data that should be redacted.
For example, the default includes 'password', 'secret', and 'api[._ -]?key'.
The specified patterns are combined with the default patterns.
defemit(self,record:LogRecord)->None:"""Send the log to Logfire. Args: record: The log record to send. """ifis_instrumentation_suppressed():self.fallback.handle(record)returnattributes=self.fill_attributes(record)self.logfire_instance.log(msg_template=attributes.pop(ATTRIBUTES_MESSAGE_TEMPLATE_KEY,record.msg),level=LOGGING_TO_OTEL_LEVEL_NUMBERS.get(record.levelno,record.levelno),attributes=attributes,exc_info=record.exc_info,)
deffill_attributes(self,record:LogRecord)->dict[str,Any]:"""Fill the attributes to send to Logfire. This method can be overridden to add more attributes. Args: record: The log record. Returns: The attributes for the log record. """attributes={k:vfork,vinrecord.__dict__.items()ifknotinRESERVED_ATTRS}attributes['code.filepath']=record.pathnameattributes['code.lineno']=record.linenoattributes['code.function']=record.funcNameattributes[ATTRIBUTES_LOGGING_NAME]=record.nameattributes[ATTRIBUTES_MESSAGE_KEY],args=_format_message(record)attributes.update(args)returnattributes
A middleware to process structlog event, and send it to Logfire.
Source code in logfire/integrations/structlog.py
3738394041424344454647484950
def__call__(self,logger:WrappedLogger,name:str,event_dict:EventDict)->EventDict:"""A middleware to process structlog event, and send it to **Logfire**."""attributes={k:vfork,vinevent_dict.items()ifknotinRESERVED_ATTRS}level=event_dict.get('level','info').lower()# NOTE: An event can be `None` in structlog. We may want to create a default msg in those cases.attributes[ATTRIBUTES_MESSAGE_KEY]=message=event_dict.get('event')or'structlog event'self.logfire_instance.log(level=level,# type: ignoremsg_template=message,attributes=attributes,console_log=self.console_log,exc_info=event_dict.get('exc_info',False),)returnevent_dict
Decorator to prevent a function/class from being traced by logfire.install_auto_tracing.
This is useful for small functions that are called very frequently and would generate too much noise.
The decorator is detected at import time.
Only @no_auto_trace or @logfire.no_auto_trace are supported.
Renaming/aliasing either the function or module won't work.
Neither will calling this indirectly via another function.
Any decorated function, or any function defined anywhere inside a decorated function/class,
will be completely ignored by logfire.install_auto_tracing.
This decorator simply returns the argument unchanged, so there is zero runtime overhead.
Source code in logfire/_internal/auto_trace/rewrite_ast.py
157158159160161162163164165166167168169170171172
defno_auto_trace(x:T)->T:"""Decorator to prevent a function/class from being traced by `logfire.install_auto_tracing`. This is useful for small functions that are called very frequently and would generate too much noise. The decorator is detected at import time. Only `@no_auto_trace` or `@logfire.no_auto_trace` are supported. Renaming/aliasing either the function or module won't work. Neither will calling this indirectly via another function. Any decorated function, or any function defined anywhere inside a decorated function/class, will be completely ignored by `logfire.install_auto_tracing`. This decorator simply returns the argument unchanged, so there is zero runtime overhead. """returnx# pragma: no cover
If True, configures and returns a Logfire instance that is not the default global instance.
Use this to create multiple separate configurations, e.g. to send to different projects.
Defaults to the LOGFIRE_SEND_TO_LOGFIRE environment variable if set, otherwise defaults to True.
If if-token-present is provided, logs will only be sent if a token is present.
The environment this service is running in, e.g. 'staging' or 'prod'. Sets the
deployment.environment.name
resource attribute. Useful for filtering within projects in the Logfire UI.
Defaults to the LOGFIRE_ENVIRONMENT environment variable.
Whether to control terminal output. If None uses the LOGFIRE_CONSOLE_* environment variables,
otherwise defaults to ConsoleOption(colors='auto', indent_spans=True, include_timestamps=True, verbose=False).
If False disables console output. It can also be disabled by setting LOGFIRE_CONSOLE environment variable to false.
Directory that contains the pyproject.toml file for this project. If None uses the
LOGFIRE_CONFIG_DIR environment variable, otherwise defaults to the current working directory.
By default, incoming trace context is extracted, but generates a warning.
Set to True to disable the warning.
Set to False to suppress extraction of incoming trace context.
See Unintentional Distributed Tracing
for more information.
This setting always applies globally, and the last value set is used, including the default value.
defconfigure(# noqa: D417*,local:bool=False,send_to_logfire:bool|Literal['if-token-present']|None=None,token:str|None=None,service_name:str|None=None,service_version:str|None=None,environment:str|None=None,console:ConsoleOptions|Literal[False]|None=None,config_dir:Path|str|None=None,data_dir:Path|str|None=None,additional_span_processors:Sequence[SpanProcessor]|None=None,metrics:MetricsOptions|Literal[False]|None=None,scrubbing:ScrubbingOptions|Literal[False]|None=None,inspect_arguments:bool|None=None,sampling:SamplingOptions|None=None,code_source:CodeSource|None=None,distributed_tracing:bool|None=None,advanced:AdvancedOptions|None=None,**deprecated_kwargs:Unpack[DeprecatedKwargs],)->Logfire:"""Configure the logfire SDK. Args: local: If `True`, configures and returns a `Logfire` instance that is not the default global instance. Use this to create multiple separate configurations, e.g. to send to different projects. send_to_logfire: Whether to send logs to logfire.dev. Defaults to the `LOGFIRE_SEND_TO_LOGFIRE` environment variable if set, otherwise defaults to `True`. If `if-token-present` is provided, logs will only be sent if a token is present. token: The project token. Defaults to the `LOGFIRE_TOKEN` environment variable. service_name: Name of this service. Defaults to the `LOGFIRE_SERVICE_NAME` environment variable. service_version: Version of this service. Defaults to the `LOGFIRE_SERVICE_VERSION` environment variable, or the current git commit hash if available. environment: The environment this service is running in, e.g. `'staging'` or `'prod'`. Sets the [`deployment.environment.name`](https://opentelemetry.io/docs/specs/semconv/resource/deployment-environment/) resource attribute. Useful for filtering within projects in the Logfire UI. Defaults to the `LOGFIRE_ENVIRONMENT` environment variable. console: Whether to control terminal output. If `None` uses the `LOGFIRE_CONSOLE_*` environment variables, otherwise defaults to `ConsoleOption(colors='auto', indent_spans=True, include_timestamps=True, verbose=False)`. If `False` disables console output. It can also be disabled by setting `LOGFIRE_CONSOLE` environment variable to `false`. config_dir: Directory that contains the `pyproject.toml` file for this project. If `None` uses the `LOGFIRE_CONFIG_DIR` environment variable, otherwise defaults to the current working directory. data_dir: Directory to store credentials, and logs. If `None` uses the `LOGFIRE_CREDENTIALS_DIR` environment variable, otherwise defaults to `'.logfire'`. additional_span_processors: Span processors to use in addition to the default processor which exports spans to Logfire's API. metrics: Set to `False` to disable sending all metrics, or provide a `MetricsOptions` object to configure metrics, e.g. additional metric readers. scrubbing: Options for scrubbing sensitive data. Set to `False` to disable. inspect_arguments: Whether to enable [f-string magic](https://logfire.pydantic.dev/docs/guides/onboarding-checklist/add-manual-tracing/#f-strings). If `None` uses the `LOGFIRE_INSPECT_ARGUMENTS` environment variable. Defaults to `True` if and only if the Python version is at least 3.11. sampling: Sampling options. See the [sampling guide](https://logfire.pydantic.dev/docs/guides/advanced/sampling/). code_source: Settings for the source code of the project. distributed_tracing: By default, incoming trace context is extracted, but generates a warning. Set to `True` to disable the warning. Set to `False` to suppress extraction of incoming trace context. See [Unintentional Distributed Tracing](https://logfire.pydantic.dev/docs/how-to-guides/distributed-tracing/#unintentional-distributed-tracing) for more information. This setting always applies globally, and the last value set is used, including the default value. advanced: Advanced options primarily used for testing by Logfire developers. """from..importDEFAULT_LOGFIRE_INSTANCE,Logfireprocessors=deprecated_kwargs.pop('processors',None)# type: ignoreifprocessorsisnotNone:# pragma: no coverraiseValueError('The `processors` argument has been replaced by `additional_span_processors`. ''Set `send_to_logfire=False` to disable the default processor.')metric_readers=deprecated_kwargs.pop('metric_readers',None)# type: ignoreifmetric_readersisnotNone:# pragma: no coverraiseValueError('The `metric_readers` argument has been replaced by ''`metrics=logfire.MetricsOptions(additional_readers=[...])`. ''Set `send_to_logfire=False` to disable the default metric reader.')collect_system_metrics=deprecated_kwargs.pop('collect_system_metrics',None)# type: ignoreifcollect_system_metricsisFalse:raiseValueError('The `collect_system_metrics` argument has been removed. ''System metrics are no longer collected by default.')ifcollect_system_metricsisnotNone:raiseValueError('The `collect_system_metrics` argument has been removed. ''Use `logfire.instrument_system_metrics()` instead.')scrubbing_callback=deprecated_kwargs.pop('scrubbing_callback',None)# type: ignorescrubbing_patterns=deprecated_kwargs.pop('scrubbing_patterns',None)# type: ignoreifscrubbing_callbackorscrubbing_patterns:ifscrubbingisnotNone:raiseValueError('Cannot specify `scrubbing` and `scrubbing_callback` or `scrubbing_patterns` at the same time. ''Use only `scrubbing`.')warnings.warn('The `scrubbing_callback` and `scrubbing_patterns` arguments are deprecated. ''Use `scrubbing=logfire.ScrubbingOptions(callback=..., extra_patterns=[...])` instead.',)scrubbing=ScrubbingOptions(callback=scrubbing_callback,extra_patterns=scrubbing_patterns)# type: ignoreproject_name=deprecated_kwargs.pop('project_name',None)# type: ignoreifproject_nameisnotNone:warnings.warn('The `project_name` argument is deprecated and not needed.',)trace_sample_rate:float|None=deprecated_kwargs.pop('trace_sample_rate',None)# type: ignoreiftrace_sample_rateisnotNone:ifsampling:raiseValueError('Cannot specify both `trace_sample_rate` and `sampling`. ''Use `sampling.head` instead of `trace_sample_rate`.')else:sampling=SamplingOptions(head=trace_sample_rate)warnings.warn('The `trace_sample_rate` argument is deprecated. ''Use `sampling=logfire.SamplingOptions(head=...)` instead.',)show_summary=deprecated_kwargs.pop('show_summary',None)# type: ignoreifshow_summaryisnotNone:# pragma: no coverwarnings.warn('The `show_summary` argument is deprecated. ''Use `console=False` or `console=logfire.ConsoleOptions(show_project_link=False)` instead.',)forkeyin('base_url','id_generator','ns_timestamp_generator'):value:Any=deprecated_kwargs.pop(key,None)# type: ignoreifvalueisNone:continueifadvancedisnotNone:raiseValueError(f'Cannot specify `{key}` and `advanced`. Use only `advanced`.')# (this means that specifying two deprecated advanced kwargs at the same time will raise an error)advanced=AdvancedOptions(**{key:value})warnings.warn(f'The `{key}` argument is deprecated. Use `advanced=logfire.AdvancedOptions({key}=...)` instead.',stacklevel=2,)additional_metric_readers:Any=deprecated_kwargs.pop('additional_metric_readers',None)# type: ignoreifadditional_metric_readers:ifmetricsisnotNone:raiseValueError('Cannot specify both `additional_metric_readers` and `metrics`. ''Use `metrics=logfire.MetricsOptions(additional_readers=[...])` instead.')warnings.warn('The `additional_metric_readers` argument is deprecated. ''Use `metrics=logfire.MetricsOptions(additional_readers=[...])` instead.',)metrics=MetricsOptions(additional_readers=additional_metric_readers)pydantic_plugin:Any=deprecated_kwargs.pop('pydantic_plugin',None)# type: ignoreifpydantic_pluginisnotNone:warnings.warn('The `pydantic_plugin` argument is deprecated. Use `logfire.instrument_pydantic()` instead.',)fromlogfire.integrations.pydanticimportset_pydantic_plugin_configset_pydantic_plugin_config(pydantic_plugin)ifdeprecated_kwargs:raiseTypeError(f'configure() got unexpected keyword arguments: {", ".join(deprecated_kwargs)}')iflocal:config=LogfireConfig()else:config=GLOBAL_CONFIGconfig.configure(send_to_logfire=send_to_logfire,token=token,service_name=service_name,service_version=service_version,environment=environment,console=console,metrics=metrics,config_dir=Path(config_dir)ifconfig_direlseNone,data_dir=Path(data_dir)ifdata_direlseNone,additional_span_processors=additional_span_processors,scrubbing=scrubbing,inspect_arguments=inspect_arguments,sampling=sampling,code_source=code_source,distributed_tracing=distributed_tracing,advanced=advanced,)iflocal:returnLogfire(config=config)else:returnDEFAULT_LOGFIRE_INSTANCE
Context manager to suppress all logs/spans generated by logfire or OpenTelemetry.
Source code in logfire/_internal/utils.py
242243244245246247248249250251252
@contextmanagerdefsuppress_instrumentation():"""Context manager to suppress all logs/spans generated by logfire or OpenTelemetry."""new_context=context.get_current()forkeyinSUPPRESS_INSTRUMENTATION_CONTEXT_KEYS:new_context=context.set_value(key,True,new_context)token=context.attach(new_context)try:yieldfinally:context.detach(token)
A dictionary with the handler and format for Loguru.
Source code in logfire/__init__.py
757677787980818283
defloguru_handler()->Any:"""Create a **Logfire** handler for Loguru. Returns: A dictionary with the handler and format for Loguru. """from.integrationsimportlogurureturn{'sink':loguru.LogfireHandler(),'format':'{message}'}