Skip to content
This repository has been archived by the owner on Apr 19, 2022. It is now read-only.

Change #L -> long(#) for Py 2 & 3 compatibility #239

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 9 additions & 6 deletions snakebite/channel.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,10 @@
import socket
import os
import math
import sys

if sys.version_info[0] == 3:
long = int

# Third party imports
from google.protobuf.service import RpcChannel
Expand All @@ -61,7 +65,6 @@
import google.protobuf.internal.decoder as decoder

# Module imports

import logger
import logging
import struct
Expand Down Expand Up @@ -162,7 +165,7 @@ def buffer_length(self):


class SocketRpcChannel(RpcChannel):
ERROR_BYTES = 18446744073709551615L
ERROR_BYTES = long(18446744073709551615)
RPC_HEADER = "hrpc"
RPC_SERVICE_CLASS = 0x00
AUTH_PROTOCOL_NONE = 0x00
Expand Down Expand Up @@ -191,7 +194,7 @@ def __init__(self, host, port, version, effective_user=None, use_sasl=False, hdf

kerberos = Kerberos()
self.effective_user = effective_user or kerberos.user_principal().name
else:
else:
self.effective_user = effective_user or get_current_username()
self.sock_connect_timeout = sock_connect_timeout
self.sock_request_timeout = sock_request_timeout
Expand Down Expand Up @@ -261,7 +264,7 @@ def get_connection(self, host, port):

self.write_delimited(rpc_header)
self.write_delimited(context)

def write(self, data):
if log.getEffectiveLevel() == logging.DEBUG:
log.debug("Sending: %s", format_bytes(data))
Expand Down Expand Up @@ -624,7 +627,7 @@ def readBlock(self, length, pool_id, block_id, generation_stamp, offset, block_t
else:
self._read_bytes(checksum_len * chunks_per_packet)

# We use a fixed size buffer (a "load") to read only a couple of chunks at once.
# We use a fixed size buffer (a "load") to read only a couple of chunks at once.
bytes_per_load = self.LOAD_SIZE - (self.LOAD_SIZE % bytes_per_chunk)
chunks_per_load = int(bytes_per_load / bytes_per_chunk)
loads_per_packet = int(math.ceil(bytes_per_chunk * chunks_per_packet / bytes_per_load))
Expand All @@ -645,7 +648,7 @@ def readBlock(self, length, pool_id, block_id, generation_stamp, offset, block_t
total_read += len(chunk)
read_on_packet += len(chunk)
yield load

# Send ClientReadStatusProto message confirming successful read
request = ClientReadStatusProto()
request.status = 0 # SUCCESS
Expand Down
2 changes: 1 addition & 1 deletion snakebite/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -1470,7 +1470,7 @@ def _switch_namenode(self, namenodes):

def __calculate_exponential_time(self, time, retries, cap):
# Same calculation as the original Hadoop client but converted to seconds
baseTime = min(time * (1L << retries), cap);
baseTime = min(time * (long(1) << retries), cap);
return (baseTime * (random.random() + 0.5)) / 1000;

def __do_retry_sleep(self, retries):
Expand Down