Kirill Zhukov
97db2097d3
added api_timeout and scrape_interval to scrape config added requirements.txt
32 lines
1.1 KiB
Python
32 lines
1.1 KiB
Python
from datetime import datetime
|
|
from pathlib import Path
|
|
|
|
from pydantic import HttpUrl, Field, BaseModel # pylint: disable=no-name-in-module
|
|
|
|
from .db_config import DbConfig
|
|
|
|
# DEFAULTS
|
|
ALBS_URL_DEFAULT = 'https://build.almalinux.org'
|
|
LOG_FILE_DEFAULT = '/tmp/extractor.log'
|
|
API_DEFAULT = 30
|
|
SCRAPE_INTERVAL_DEFAULT = 3600
|
|
|
|
|
|
class ExtractorConfig(BaseModel):
|
|
"""
|
|
config model for Extractor service
|
|
"""
|
|
log_file: Path = Field(description='logfile path',
|
|
default=LOG_FILE_DEFAULT)
|
|
albs_url: HttpUrl = Field(description='ALBS root URL',
|
|
default=ALBS_URL_DEFAULT)
|
|
oldest_build_age: datetime = \
|
|
Field(description='oldest build age to extract and store')
|
|
jwt: str = Field(description='ALBS JWT token')
|
|
db_config: DbConfig = Field(description="database configuration")
|
|
api_timeout: int = Field(
|
|
description="max time in seconds to wait for API response",
|
|
default=API_DEFAULT)
|
|
scrape_interval: int = Field(description='how often (in seconds) we will extract data from ALBS',
|
|
default=SCRAPE_INTERVAL_DEFAULT)
|