PK ! Ru __init__.pynu [ from __future__ import absolute_import
# For backwards compatibility, provide imports that used to be here.
from .connection import is_connection_dropped
from .request import make_headers
from .response import is_fp_closed
from .ssl_ import (
SSLContext,
HAS_SNI,
IS_PYOPENSSL,
IS_SECURETRANSPORT,
assert_fingerprint,
resolve_cert_reqs,
resolve_ssl_version,
ssl_wrap_socket,
)
from .timeout import (
current_time,
Timeout,
)
from .retry import Retry
from .url import (
get_host,
parse_url,
split_first,
Url,
)
from .wait import (
wait_for_read,
wait_for_write
)
__all__ = (
'HAS_SNI',
'IS_PYOPENSSL',
'IS_SECURETRANSPORT',
'SSLContext',
'Retry',
'Timeout',
'Url',
'assert_fingerprint',
'current_time',
'is_connection_dropped',
'is_fp_closed',
'get_host',
'parse_url',
'make_headers',
'resolve_cert_reqs',
'resolve_ssl_version',
'split_first',
'ssl_wrap_socket',
'wait_for_read',
'wait_for_write'
)
PK ! TN N
execute.pynu [ # -*- coding: utf-8 -*-
import logging
import os
import sys
import subprocess
import tempfile
from pathlib import Path
from typing import Any, Tuple, Union, List
from subprocess import list2cmdline
from primordial.sizes import ByteSize
from customer_local_ops.util import fmap
DEFAULT_MAX_SEGMENT_SIZE = ByteSize(KiBytes=32)
LOG = logging.getLogger(__name__)
MAX_COMMAND_RESPONSE_BUFFER_SIZE = 2 * DEFAULT_MAX_SEGMENT_SIZE
# Type aliases
RunCommandResult = Tuple[int, str, str]
# pylint: disable=import-error
if sys.platform == 'win32':
import win32con
from win32com.shell import shellcon
from win32com.shell import shell
HANDLE = int
HWND = int
HKEY = int
DWORD = int
def shell_start(file_name: str,
params: str = None,
verb: str = "open",
show: int = win32con.SW_SHOWNORMAL,
mask: int = shellcon.SEE_MASK_FLAG_DDEWAIT,
dir_name: str = None,
id_list: Any = None,
class_id: str = None,
class_key: HKEY = None,
hot_key: DWORD = None,
monitor: HANDLE = None,
window: HWND = None) -> bool:
"""
Wrapper for the Win32 API call ShellExecuteEx. This function can be used on Windows platforms any time Nydus
needs to restart itself for any reason because ShellExecuteEx allows the child process to survive after its
parent process stops. Initially added to resolve a DLL conflict issue, this could also be useful for
upgrading Nydus, for example.
Further information for ShellExecuteEx can be found at
https://docs.microsoft.com/en-us/windows/win32/api/shellapi/nf-shellapi-shellexecuteexa
:param file_name: The name of the executable to be started. ShellExecuteEx also handles file associations,
so this could also be the name of a document, for example, to be opened with the
associated application, or a URL to be opened by the user's default browser.
:param params: Parameters to be passed to the application. Should not be specified if file_name
is NOT an executable file.
:param verb: Action to take. May be one of 'edit', 'explore', 'find', 'open', 'print', 'properties' or
'runas'. Defaults to 'open'
:param show: Specify how an application is to be shown
:param mask: Flags that indicate content and validity of other parameters
:param dir_name: Name of the working directory. If None, the current directory is used
:param id_list: See Microsoft documentation at the URL above
:param class_id: See Microsoft documentation at the URL above
:param class_key: See Microsoft documentation at the URL above
:param hot_key: See Microsoft documentation at the URL above
:param monitor: See Microsoft documentation at the URL above
:param window: See Microsoft documentation at the URL above
:return: True if the file/application is successfully launched; otherwise, False
"""
def add_field(key: str, val: Any) -> None:
"""
It is an error to include a field with a value of None in the structure passed to ShellExecuteEx, so this
function only adds a key to the dictionary if the value is not None.
:param key: The name of the SHELLEXECUTEINFO field
:param val: The value of the field
"""
if val is not None:
shell_execute_info[key] = val
shell_execute_info = {}
add_field('fMask', mask)
add_field('hwnd', window)
add_field('lpVerb', verb)
add_field('lpFile', file_name)
add_field('lpParameters', params)
add_field('lpDirectory', dir_name)
add_field('nShow', show)
add_field('lpIDList', id_list)
add_field('lpClass', class_id)
add_field('hkeyClass', class_key)
add_field('dwHotKey', hot_key)
add_field('hMonitor', monitor)
return shell.ShellExecuteEx(**shell_execute_info)
else:
def shell_start(**kwargs) -> bool:
return True
def pfx(line):
return '| ' + line
def pfxs(lines):
return list(map(pfx, lines))
def trim_visible_output_to_lines(inp, out):
"""Converts input blob to lines... but not too many"""
if inp is not None:
out = out.replace(inp, '<>', 1)
outl = out.strip().split('\n')
if not outl:
return outl
if len(outl) == 1 and not outl[0].strip():
return [] # no single blank lines please
if len(outl) < 11:
return pfxs(outl)
return pfxs(outl[:5] + ['...elided %d lines...' % (len(outl) - 10)] + outl[-5:])
def mark_error_output(errOut):
"Converts error blob into lines with prefix marks"
se = errOut.strip()
if se:
for L in se.split('\n'):
yield 'ERR: ' + L
class EndsBuffer:
"""String buffer that maintains two segments: a first segment and a
last segment (i.e. the two ends), where each has a maximum size
limit. Any extra output in between these is discarded. The
assumption is that if there is a huge amount of output that
some must be discarded to avoid memory overflows and that
generally the first and last stuff is the most relevant.
"""
def __init__(self, max_segment_size=None):
self._mss = ByteSize(max(0, max_segment_size or 0)) or DEFAULT_MAX_SEGMENT_SIZE
self._data = ''
self._elided = False
def add_str(self, data):
self._data += data
if len(self._data) > self._mss * 2:
self._data = self._data[:self._mss] + self._data[-int(self._mss):]
self._elided = True
def get_bufstr(self):
if self._elided:
return '\n'.join([
self._data[:self._mss],
'...[elided]...',
self._data[-int(self._mss):],
])
return self._data
def str_form(bytestr):
try:
return bytestr.decode('utf-8', errors='backslashreplace')
except UnicodeDecodeError:
try:
import chardet # pylint: disable=import-outside-toplevel
try:
return bytestr.decode(chardet.detect(bytestr)['encoding'])
except UnicodeDecodeError:
pass
except ImportError:
pass
except AttributeError:
return bytestr # already a string
class RunningCommand:
def __init__(self, popen_obj, tag, stdInput, errorOK, logger,
max_buffer_size=MAX_COMMAND_RESPONSE_BUFFER_SIZE):
self.p = popen_obj
self.tag = tag
self.stdInput = stdInput
self.logger = (lambda r, _l=logger or LOG, e=errorOK:
getattr(_l, "error" if r and not e else "info"))
self._bufsize = max_buffer_size
self._output = EndsBuffer(self._bufsize / 2)
self._errors = EndsBuffer(self._bufsize / 2)
self._partial = False
self._completed = False
def checkIfCompleted(self):
self._output.add_str(str_form(self.p.stdout.read()))
self._errors.add_str(str_form(self.p.stderr.read()))
return self.p.poll() is not None
def outputToDate(self):
o = self._output.get_bufstr()
e = self._errors.get_bufstr()
self._output = EndsBuffer(self._bufsize / 2)
self._errors = EndsBuffer(self._bufsize / 2)
self._partial = True
return o, e
def waitForCompletion(self):
# n.b. communicate returns byte strings, in an unknown encoding, although
# with older python, they could also be strings
o, e = fmap(str_form, self.p.communicate())
self._output.add_str(o)
self._errors.add_str(e)
self._completed = True
return self
def getResult(self, omitOutput=False):
if not self._completed:
self.waitForCompletion()
outs, errs = self._output.get_bufstr(), self._errors.get_bufstr()
output_lines = trim_visible_output_to_lines(self.stdInput, outs)
if omitOutput:
output_lines = [line if line.startswith("<>") else "<>" for line in output_lines]
for line in (output_lines
+ list(mark_error_output(errs))
+ ["ExitCode: %d" % self.p.returncode]):
lstr = self.tag + ' ' + line
self.logger(self.p.returncode)(lstr)
# NOTE This is not a Nydus Result; it must be converted
return self.p.returncode, outs.strip(), errs.strip()
def startCommand(cmd, tag, useShell=False, stdInput=None, errorOK=False,
logger=None, omitString=None, env=None):
"""Starts the specified command; no Shell. The cmd is expected to be an
array of strings, although it is also possible to specify
simply a string if there are no arguments to the command.
The tag value is only used to annotate log lines for
distinctive tagging in the logging output.
If errorOK is True then a non-zero return code is still only
logged as "info" instead of "error". The ProcOpResult will
accurately reflect the result of the operation regardless of
the errorOK argument.
If omitString is specified, it is removed from any logging output.
The command is run asynchronously in a separate process. The
return value is a RunningCommand object that supports a
.checkIfCompleted() method to test for subprocess completion
(non-blocking) and a .getResult() to get the ProcOpResult of a
completed process (will block until completion if not already
completed).
"""
# if cmd is a string, leave as is to correctly obfuscate omitString
# If cmd is a bytestring, make sure omitString and the replacement text are also bytestrings
log_cmd = ' '.join(cmd) if isinstance(cmd, list) else cmd
replace_text = "<>"
if isinstance(log_cmd, bytes):
if omitString is not None and isinstance(omitString, str):
omitString = omitString.encode('utf-8')
replace_text = b'<>'
if omitString:
log_cmd = log_cmd.replace(omitString, replace_text)
if logger is None:
logger = LOG
logger.info(tag + " RUN: " + str(log_cmd)) # pylint: disable=logging-not-lazy
# pylint: disable=consider-using-with
p = subprocess.Popen(cmd,
shell=useShell,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env)
if stdInput:
p.stdin.write(stdInput.encode())
return RunningCommand(p, tag, stdInput, errorOK, logger)
def runCommand(cmd, tag, useShell=False, stdInput=None, errorOK=False,
logger=None, omitString=None, omitOutput=False, env=None):
"""Runs the specified command and waits for completion; no Shell. The
cmd is expected to be an array of strings, although it is also
possible to specify simply a string if there are no arguments
to the command.
The tag value is only used to annotate log lines for
distinctive tagging in the logging output.
If errorOK is True then a non-zero return code is still only
logged as "info" instead of "error". The ProcOpResult will
accurately reflect the result of the operation regardless of
the errorOK argument.
If omitString is specified, it is removed from any logging output.
"""
return startCommand(cmd, tag,
useShell=useShell,
stdInput=stdInput,
errorOK=errorOK,
logger=logger,
omitString=omitString,
env=env) \
.waitForCompletion() \
.getResult(omitOutput=omitOutput)
def run_command_pipe(cmd, useShell=False):
"""Used to run commands that make use of | """
p = subprocess.Popen(cmd, shell=useShell, stdout=subprocess.PIPE)
with p:
o, e = p.communicate()
return p.returncode, o, e
def run_powershell(command: str, tag: str, quiet: bool = False) -> Tuple[int, str, str]:
"""Execute the code in ``command`` using PowerShell.
:param command: Code to execute
:param tag: Short string that will be added to logs to improve searchability.
:param quiet: Omit the output of the command from the result.
:return: Tuple containing (exit_code, stdout, stderr)
"""
# 1. Python 3.5 Windows: cannot use context manager - it does not unlink the file on exit.
# 2. Encode in UTF-8 with byte order mark (BOM) so PowerShell 5.1 (Windows 2016) sees the file is UTF-8. Without
# this, the file will be encoded with the default system encoding which can be changed by administrators and
# which, by default, cannot encode all Unicode characters properly. Encoding as UTF-8 without BOM does not work
# because standard output cannot be decoded as UTF-8.
# - Encoding PowerShell scripts:
# https://docs.microsoft.com/en-us/powershell/scripting/dev-cross-plat/
# vscode/understanding-file-encoding?view=powershell-7.1
# - utf-8-sig encoding: https://docs.python.org/3.5/library/codecs.html#encodings-and-unicode
# - Default encoding (locale.getpreferredencoding()): https://docs.python.org/3.5/library/functions.html#open
# pylint: disable=consider-using-with
temp = tempfile.NamedTemporaryFile(mode='w+t', encoding='utf-8-sig', suffix='.ps1', delete=False)
try:
temp.write(command)
temp.close()
if not quiet:
LOG.debug("Running PowerShell command: %s", command)
return run_powershell_file(temp.name, tag, quiet)
finally:
os.unlink(temp.name)
def run_shell_script_file(script_file: Union[str, Path], tag: str,
quiet: bool = False, script_file_args: List[str] = None,
stdin: str = None) -> Tuple[int, str, str]:
"""Invoke Shell script, passing just ``filename`` or ``filepath`` and a list of arguments for execution of the file
:param script_file: The path of the script file to execute
:param tag: Short string that will be added to logs to improve searchability.
:param quiet: Omit the output of the command from the result.
:param script_file_args: List of string arguments if required while execution of the file
:param stdin: string to pass through standard input
:return: Tuple containing (exit_code, stdout, stderr)
"""
command = ['bash', str(script_file)]
if script_file_args:
for arg in script_file_args:
command.append(str(arg))
return runCommand(command, tag, stdInput=stdin, omitOutput=quiet)
def run_powershell_file(script_file: Union[str, Path], tag: str,
quiet: bool = False, script_file_args: List[str] = None,
stdin: str = None) -> Tuple[int, str, str]:
"""Invoke PowerShell, passing just ``filename`` or ``filename`` and a list of arguments for execution of the file
:param script_file: The path of the script file to execute
:param tag: Short string that will be added to logs to improve searchability.
:param quiet: Omit the output of the command from the result.
:param script_file_args: List of string arguments if required while execution of the file
:param stdin: string to pass through standard input
:return: Tuple containing (exit_code, stdout, stderr)
"""
command = ['powershell.exe', '-ExecutionPolicy', 'Unrestricted', '-File', str(script_file)]
if script_file_args:
for arg in script_file_args:
command.append(str(arg))
return runCommand(command, tag, stdInput=stdin, omitOutput=quiet)
def run_uapi_command(cmd_list: Union[str, List[str]], description: str, op_name: str,
use_shell: bool = False, omit_string: str = None) -> Tuple[bool, str, str]:
"""Run a command on the target machine
:param cmd_list: string or list of strings that represents a command to be executed
:param description: short description of what the command is doing
:param op_name: name of the operation that is running the command
:param use_shell: indicator that the command should be executed in a shell
:param omit_string: a string to be removed from any logging output.
:return: Tuple containing (exit_code, stdout, stderr)
"""
cmd_list_str = str(cmd_list)
if omit_string:
cmd_list_str = cmd_list_str.replace(omit_string, "<>")
LOG.debug("run_uapi_command %s cmd_list: %s", op_name, cmd_list_str)
exit_code, outs, errs = runCommand(cmd_list, description, useShell=use_shell, omitString=omit_string)
LOG.debug("%s_result- %s - %s - %s", description, exit_code, outs, errs)
return exit_code, outs, errs
def run_multiple_uapi_commands(cmds_list: List[Tuple[str, Union[str, List[str]]]], op_name: str,
use_shell: bool = False, omit_string: str = None) -> Tuple[bool, str, str]:
"""Run a command on the target machine
:param cmds_list: List of tuples with description-command pairs
:param op_name: name of the operation that is running the command
:param use_shell: indicator that the command should be executed in a shell
:param omit_string: a string to be removed from any logging output.
:return: Tuple containing (exit_code, stdout, stderr)
"""
exit_code, outs, errs = 0, '', ''
for description, cmd in cmds_list:
exit_code, outs, errs = run_uapi_command(cmd, description, op_name, use_shell=use_shell,
omit_string=omit_string)
if exit_code != 0:
return exit_code, outs, errs
return exit_code, outs, errs
def start_powershell_file(script_file: Union[str, Path]) -> Tuple[int, str, str]:
"""
Invoke PowerShell, passing just ``filename`` or ``filename`` and a list of arguments for execution of the file.
Do not wait for the sub-process to complete. The exit code returned in the tuple only indicates whether or
not the process was successfully started.
:param script_file: The path of the script file to execute
:param tag: Short string that will be added to logs to improve searchability.
:param quiet: Omit the output of the command from the result.
:param script_file_args: List of string arguments if required while execution of the file
:return: Tuple containing (exit_code, stdout, stderr)
"""
params = list2cmdline(['-ExecutionPolicy', 'Unrestricted', '-File', str(script_file)])
LOG.info("Staring PowerShell: powershell.exe %s", params)
code = 0 if shell_start('powershell.exe', params=params) else 1
return code, '', ''
def start_powershell(command: str) -> Tuple[int, str, str]:
"""Execute the code in ``command`` using PowerShell.
:param command: Code to execute
:param tag: Short string that will be added to logs to improve searchability.
:param quiet: Omit the output of the command from the result.
:return: Tuple containing (exit_code, stdout, stderr)
"""
# Because the process started by this command could (and probably will) survive after this process has exited,
# we can't delete the temporary file, here.
# pylint: disable=consider-using-with
temp = tempfile.NamedTemporaryFile(mode='w+t', suffix='.ps1', delete=False)
temp.write(command)
temp.close()
LOG.debug("Running Powershell command: %s", command)
return start_powershell_file(temp.name)
PK ! f6l l
helpers.pynu [ # -*- coding: utf-8 -*-
"""\
© Copyright. All rights reserved.
"""
from __future__ import unicode_literals
from fileinput import FileInput
from functools import reduce
import sys
class HandledFailureException(Exception):
pass
def edit_file_lines(filename, editfunc):
"""Iterates through the lines of the named file, calling the editfunc
for each line, replacing the original file with the new
output.
"""
with FileInput(files=(filename,), inplace=True) as f:
for line in f:
sys.stdout.write(editfunc(line))
def replace_file_lines_multiple(filename, replace_dict, firstword=True):
"""Iterates through the lines of the named file, and through a
dictionary of match -> replace pairs, replacing all that apply for each line,
and replacing the original file with the new output.
"""
with FileInput(files=(filename,), inplace=True) as f:
for line in f:
updated_line = line
for match, replace in replace_dict.items():
updated_line = replace_line(updated_line, match, replace, firstword)
sys.stdout.write(updated_line)
def replace_line(line, match, replace, firstword=True):
"""Checks `line` to see if it starts with `match`; if yes, return
`replace`, otherwise return the original `line`. If
`firstword` is True, then the match must be followed by at
least one space or tab (unless it matches the entire line),
otherwise it is matched directly.
Suitable for use as an editfunc with `edit_file_lines`.
"""
# pylint: disable=too-many-boolean-expressions
if (
line.strip() == match
or (firstword and (line.startswith(match+' ') or line.startswith(match+'\t')))
or (not firstword and line.startswith(match))
):
return replace
return line
def append_line(filename, line):
"""Appends a line to to the specified file.
"""
with open(filename, 'a', encoding='UTF-8') as file:
file.write('\n')
file.write(line)
def create_file(filename, contents):
"""Creates the specified file, with the specified contents.
Overwrites any file already existing at that location.
"""
with open(filename, 'w', encoding='UTF-8') as wf:
wf.write(contents)
def compose(*funcs):
return reduce(lambda f, g: lambda x: f(g(x)), funcs, lambda x: x)
PK ! U3 ; ; retry.pynu [ from __future__ import absolute_import
import time
import logging
from collections import namedtuple
from itertools import takewhile
import email
import re
from ..exceptions import (
ConnectTimeoutError,
MaxRetryError,
ProtocolError,
ReadTimeoutError,
ResponseError,
InvalidHeader,
)
from ..packages import six
log = logging.getLogger(__name__)
# Data structure for representing the metadata of requests that result in a retry.
RequestHistory = namedtuple('RequestHistory', ["method", "url", "error",
"status", "redirect_location"])
class Retry(object):
""" Retry configuration.
Each retry attempt will create a new Retry object with updated values, so
they can be safely reused.
Retries can be defined as a default for a pool::
retries = Retry(connect=5, read=2, redirect=5)
http = PoolManager(retries=retries)
response = http.request('GET', 'http://example.com/')
Or per-request (which overrides the default for the pool)::
response = http.request('GET', 'http://example.com/', retries=Retry(10))
Retries can be disabled by passing ``False``::
response = http.request('GET', 'http://example.com/', retries=False)
Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless
retries are disabled, in which case the causing exception will be raised.
:param int total:
Total number of retries to allow. Takes precedence over other counts.
Set to ``None`` to remove this constraint and fall back on other
counts. It's a good idea to set this to some sensibly-high value to
account for unexpected edge cases and avoid infinite retry loops.
Set to ``0`` to fail on the first retry.
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
:param int connect:
How many connection-related errors to retry on.
These are errors raised before the request is sent to the remote server,
which we assume has not triggered the server to process the request.
Set to ``0`` to fail on the first retry of this type.
:param int read:
How many times to retry on read errors.
These errors are raised after the request was sent to the server, so the
request may have side-effects.
Set to ``0`` to fail on the first retry of this type.
:param int redirect:
How many redirects to perform. Limit this to avoid infinite redirect
loops.
A redirect is a HTTP response with a status code 301, 302, 303, 307 or
308.
Set to ``0`` to fail on the first retry of this type.
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
:param int status:
How many times to retry on bad status codes.
These are retries made on responses, where status code matches
``status_forcelist``.
Set to ``0`` to fail on the first retry of this type.
:param iterable method_whitelist:
Set of uppercased HTTP method verbs that we should retry on.
By default, we only retry on methods which are considered to be
idempotent (multiple requests with the same parameters end with the
same state). See :attr:`Retry.DEFAULT_METHOD_WHITELIST`.
Set to a ``False`` value to retry on any verb.
:param iterable status_forcelist:
A set of integer HTTP status codes that we should force a retry on.
A retry is initiated if the request method is in ``method_whitelist``
and the response status code is in ``status_forcelist``.
By default, this is disabled with ``None``.
:param float backoff_factor:
A backoff factor to apply between attempts after the second try
(most errors are resolved immediately by a second try without a
delay). urllib3 will sleep for::
{backoff factor} * (2 ^ ({number of total retries} - 1))
seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep
for [0.0s, 0.2s, 0.4s, ...] between retries. It will never be longer
than :attr:`Retry.BACKOFF_MAX`.
By default, backoff is disabled (set to 0).
:param bool raise_on_redirect: Whether, if the number of redirects is
exhausted, to raise a MaxRetryError, or to return a response with a
response code in the 3xx range.
:param iterable remove_headers_on_redirect:
Sequence of headers to remove from the request when a response
indicating a redirect is returned before firing off the redirected
request
:param bool raise_on_status: Similar meaning to ``raise_on_redirect``:
whether we should raise an exception, or return a response,
if status falls in ``status_forcelist`` range and retries have
been exhausted.
:param tuple history: The history of the request encountered during
each call to :meth:`~Retry.increment`. The list is in the order
the requests occurred. Each list item is of class :class:`RequestHistory`.
:param bool respect_retry_after_header:
Whether to respect Retry-After header on status codes defined as
:attr:`Retry.RETRY_AFTER_STATUS_CODES` or not.
"""
DEFAULT_METHOD_WHITELIST = frozenset([
'HEAD', 'GET', 'PUT', 'DELETE', 'OPTIONS', 'TRACE'])
DEFAULT_REDIRECT_HEADERS_BLACKLIST = frozenset(['Authorization'])
RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503])
#: Maximum backoff time.
BACKOFF_MAX = 120
def __init__(self, total=10, connect=None, read=None, redirect=None, status=None,
method_whitelist=DEFAULT_METHOD_WHITELIST, status_forcelist=None,
backoff_factor=0, raise_on_redirect=True, raise_on_status=True,
history=None, respect_retry_after_header=True,
remove_headers_on_redirect=DEFAULT_REDIRECT_HEADERS_BLACKLIST):
self.total = total
self.connect = connect
self.read = read
self.status = status
if redirect is False or total is False:
redirect = 0
raise_on_redirect = False
self.redirect = redirect
self.status_forcelist = status_forcelist or set()
self.method_whitelist = method_whitelist
self.backoff_factor = backoff_factor
self.raise_on_redirect = raise_on_redirect
self.raise_on_status = raise_on_status
self.history = history or tuple()
self.respect_retry_after_header = respect_retry_after_header
self.remove_headers_on_redirect = remove_headers_on_redirect
def new(self, **kw):
params = dict(
total=self.total,
connect=self.connect, read=self.read, redirect=self.redirect, status=self.status,
method_whitelist=self.method_whitelist,
status_forcelist=self.status_forcelist,
backoff_factor=self.backoff_factor,
raise_on_redirect=self.raise_on_redirect,
raise_on_status=self.raise_on_status,
history=self.history,
remove_headers_on_redirect=self.remove_headers_on_redirect,
)
params.update(kw)
return type(self)(**params)
@classmethod
def from_int(cls, retries, redirect=True, default=None):
""" Backwards-compatibility for the old retries format."""
if retries is None:
retries = default if default is not None else cls.DEFAULT
if isinstance(retries, Retry):
return retries
redirect = bool(redirect) and None
new_retries = cls(retries, redirect=redirect)
log.debug("Converted retries value: %r -> %r", retries, new_retries)
return new_retries
def get_backoff_time(self):
""" Formula for computing the current backoff
:rtype: float
"""
# We want to consider only the last consecutive errors sequence (Ignore redirects).
consecutive_errors_len = len(list(takewhile(lambda x: x.redirect_location is None,
reversed(self.history))))
if consecutive_errors_len <= 1:
return 0
backoff_value = self.backoff_factor * (2 ** (consecutive_errors_len - 1))
return min(self.BACKOFF_MAX, backoff_value)
def parse_retry_after(self, retry_after):
# Whitespace: https://tools.ietf.org/html/rfc7230#section-3.2.4
if re.match(r"^\s*[0-9]+\s*$", retry_after):
seconds = int(retry_after)
else:
retry_date_tuple = email.utils.parsedate(retry_after)
if retry_date_tuple is None:
raise InvalidHeader("Invalid Retry-After header: %s" % retry_after)
retry_date = time.mktime(retry_date_tuple)
seconds = retry_date - time.time()
if seconds < 0:
seconds = 0
return seconds
def get_retry_after(self, response):
""" Get the value of Retry-After in seconds. """
retry_after = response.getheader("Retry-After")
if retry_after is None:
return None
return self.parse_retry_after(retry_after)
def sleep_for_retry(self, response=None):
retry_after = self.get_retry_after(response)
if retry_after:
time.sleep(retry_after)
return True
return False
def _sleep_backoff(self):
backoff = self.get_backoff_time()
if backoff <= 0:
return
time.sleep(backoff)
def sleep(self, response=None):
""" Sleep between retry attempts.
This method will respect a server's ``Retry-After`` response header
and sleep the duration of the time requested. If that is not present, it
will use an exponential backoff. By default, the backoff factor is 0 and
this method will return immediately.
"""
if response:
slept = self.sleep_for_retry(response)
if slept:
return
self._sleep_backoff()
def _is_connection_error(self, err):
""" Errors when we're fairly sure that the server did not receive the
request, so it should be safe to retry.
"""
return isinstance(err, ConnectTimeoutError)
def _is_read_error(self, err):
""" Errors that occur after the request has been started, so we should
assume that the server began processing it.
"""
return isinstance(err, (ReadTimeoutError, ProtocolError))
def _is_method_retryable(self, method):
""" Checks if a given HTTP method should be retried upon, depending if
it is included on the method whitelist.
"""
if self.method_whitelist and method.upper() not in self.method_whitelist:
return False
return True
def is_retry(self, method, status_code, has_retry_after=False):
""" Is this method/status code retryable? (Based on whitelists and control
variables such as the number of total retries to allow, whether to
respect the Retry-After header, whether this header is present, and
whether the returned status code is on the list of status codes to
be retried upon on the presence of the aforementioned header)
"""
if not self._is_method_retryable(method):
return False
if self.status_forcelist and status_code in self.status_forcelist:
return True
return (self.total and self.respect_retry_after_header and
has_retry_after and (status_code in self.RETRY_AFTER_STATUS_CODES))
def is_exhausted(self):
""" Are we out of retries? """
retry_counts = (self.total, self.connect, self.read, self.redirect, self.status)
retry_counts = list(filter(None, retry_counts))
if not retry_counts:
return False
return min(retry_counts) < 0
def increment(self, method=None, url=None, response=None, error=None,
_pool=None, _stacktrace=None):
""" Return a new Retry object with incremented retry counters.
:param response: A response object, or None, if the server did not
return a response.
:type response: :class:`~urllib3.response.HTTPResponse`
:param Exception error: An error encountered during the request, or
None if the response was received successfully.
:return: A new ``Retry`` object.
"""
if self.total is False and error:
# Disabled, indicate to re-raise the error.
raise six.reraise(type(error), error, _stacktrace)
total = self.total
if total is not None:
total -= 1
connect = self.connect
read = self.read
redirect = self.redirect
status_count = self.status
cause = 'unknown'
status = None
redirect_location = None
if error and self._is_connection_error(error):
# Connect retry?
if connect is False:
raise six.reraise(type(error), error, _stacktrace)
elif connect is not None:
connect -= 1
elif error and self._is_read_error(error):
# Read retry?
if read is False or not self._is_method_retryable(method):
raise six.reraise(type(error), error, _stacktrace)
elif read is not None:
read -= 1
elif response and response.get_redirect_location():
# Redirect retry?
if redirect is not None:
redirect -= 1
cause = 'too many redirects'
redirect_location = response.get_redirect_location()
status = response.status
else:
# Incrementing because of a server error like a 500 in
# status_forcelist and a the given method is in the whitelist
cause = ResponseError.GENERIC_ERROR
if response and response.status:
if status_count is not None:
status_count -= 1
cause = ResponseError.SPECIFIC_ERROR.format(
status_code=response.status)
status = response.status
history = self.history + (RequestHistory(method, url, error, status, redirect_location),)
new_retry = self.new(
total=total,
connect=connect, read=read, redirect=redirect, status=status_count,
history=history)
if new_retry.is_exhausted():
raise MaxRetryError(_pool, url, error or ResponseError(cause))
log.debug("Incremented Retry for (url='%s'): %r", url, new_retry)
return new_retry
def __repr__(self):
return ('{cls.__name__}(total={self.total}, connect={self.connect}, '
'read={self.read}, redirect={self.redirect}, status={self.status})').format(
cls=type(self), self=self)
# For backwards compatibility (equivalent to pre-v1.9):
Retry.DEFAULT = Retry(3)
PK ! qCA A # __pycache__/__init__.cpython-38.pycnu [ U
afY @ sR d dl mZ d dlZd dlZd dlZeedddZdd Zdeedd d
Z dS ) ) b64encodeN)sreturnc C s t | S )z?Return `s` base64-encoded.
:param s: string to encode
)r encodedecode)r r Q/opt/nydus/tmp/pip-target-53d1vnqk/lib/python/customer_local_ops/util/__init__.pyb64str s r c C s t |tr tttt| |S t |ttf}|s^zt |ttt t
f}W n tk
r\ Y nX |rt|drtttt| |
S tttt| |S t|dr|| S | |S )z!Homomorphic mapping of a functionitemsfmap)
isinstancetuplemap functoolspartialr listdictfilterziprange TypeErrorhasattrr
)funcobjZ
iterableitemsr r r r s
r )lengthr c sJ | st dd} tjdddddd d fddt| D S ) z}Create a random password.
:param length: An int specifying password length
:returns: the password, in cleartext
" '\c s" g | ]}t tjtj qS r )randomchoicestringdigits
ascii_letters).0XZedited_punctuationr r
0 s z#random_password..)r! randintr# punctuationreplacejoinr )r r r( r random_password' s r. )N)
base64r r r! r# strr r intr. r r r r s PK ! &CC