Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

rm tf.contrib #35

Open
wants to merge 2 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions dataset_tool.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
import traceback
import numpy as np
import tensorflow as tf
tf = tf.compat.v1
import PIL.Image
import dnnlib.tflib as tflib

Expand Down
2 changes: 2 additions & 0 deletions dnnlib/tflib/autosummary.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,8 @@
from collections import OrderedDict
import numpy as np
import tensorflow as tf
tf = tf.compat.v1

from tensorboard import summary as summary_lib
from tensorboard.plugins.custom_scalar import layout_pb2

Expand Down
2 changes: 2 additions & 0 deletions dnnlib/tflib/custom_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@
import tempfile
import shutil
import tensorflow as tf
tf = tf.compat.v1

from tensorflow.python.client import device_lib # pylint: disable=no-name-in-module

#----------------------------------------------------------------------------
Expand Down
2 changes: 2 additions & 0 deletions dnnlib/tflib/network.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,8 @@
import sys
import numpy as np
import tensorflow as tf
tf = tf.compat.v1


from collections import OrderedDict
from typing import Any, List, Tuple, Union
Expand Down
2 changes: 2 additions & 0 deletions dnnlib/tflib/ops/fused_bias_act.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@
import os
import numpy as np
import tensorflow as tf
tf = tf.compat.v1

from .. import custom_ops
from ...util import EasyDict

Expand Down
2 changes: 2 additions & 0 deletions dnnlib/tflib/ops/upfirdn_2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@
import os
import numpy as np
import tensorflow as tf
tf = tf.compat.v1

from .. import custom_ops

def _get_plugin():
Expand Down
16 changes: 9 additions & 7 deletions dnnlib/tflib/optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@
import platform
import numpy as np
import tensorflow as tf
tf = tf.compat.v1


from collections import OrderedDict
from typing import List, Union
Expand Down Expand Up @@ -131,13 +133,13 @@ def register_gradients(self, loss: TfExpression, trainable_vars: Union[List, dic

# Report memory usage if requested.
deps = []
if self._report_mem_usage:
self._report_mem_usage = False
try:
with tf.name_scope(self.id + '_mem'), tf.device(device.name), tf.control_dependencies([loss]):
deps.append(autosummary.autosummary(self.id + "/mem_usage_gb", tf.contrib.memory_stats.BytesInUse() / 2**30))
except tf.errors.NotFoundError:
pass
# if self._report_mem_usage:
# self._report_mem_usage = False
# try:
# with tf.name_scope(self.id + '_mem'), tf.device(device.name), tf.control_dependencies([loss]):
# deps.append(autosummary.autosummary(self.id + "/mem_usage_gb", tf.contrib.memory_stats.BytesInUse() / 2**30))
# except tf.errors.NotFoundError:
# pass

# Compute gradients.
with tf.name_scope(self.id + "_grad"), tf.device(device.name), tf.control_dependencies(deps):
Expand Down
5 changes: 3 additions & 2 deletions dnnlib/tflib/tfutil.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,12 +9,13 @@
import os
import numpy as np
import tensorflow as tf
tf = tf.compat.v1

# Silence deprecation warnings from TensorFlow 1.13 onwards
import logging
logging.getLogger('tensorflow').setLevel(logging.ERROR)
import tensorflow.contrib # requires TensorFlow 1.x!
tf.contrib = tensorflow.contrib
# import tensorflow.contrib # requires TensorFlow 1.x!
# tf.contrib = tensorflow.contrib

from typing import Any, Iterable, List, Union

Expand Down
2 changes: 2 additions & 0 deletions metrics/frechet_inception_distance.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,8 @@
import numpy as np
import scipy
import tensorflow as tf
tf = tf.compat.v1

import dnnlib.tflib as tflib

from metrics import metric_base
Expand Down
2 changes: 2 additions & 0 deletions metrics/inception_score.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@

import numpy as np
import tensorflow as tf
tf = tf.compat.v1

import dnnlib.tflib as tflib

from metrics import metric_base
Expand Down
2 changes: 2 additions & 0 deletions metrics/linear_separability.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,8 @@
import numpy as np
import sklearn.svm
import tensorflow as tf
tf = tf.compat.v1

import dnnlib.tflib as tflib

from metrics import metric_base
Expand Down
2 changes: 2 additions & 0 deletions metrics/metric_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,8 @@
import hashlib
import numpy as np
import tensorflow as tf
tf = tf.compat.v1

import dnnlib
import dnnlib.tflib as tflib

Expand Down
2 changes: 2 additions & 0 deletions metrics/perceptual_path_length.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@

import numpy as np
import tensorflow as tf
tf = tf.compat.v1

import dnnlib.tflib as tflib

from metrics import metric_base
Expand Down
2 changes: 2 additions & 0 deletions metrics/precision_recall.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@
import os
import numpy as np
import tensorflow as tf
tf = tf.compat.v1

import dnnlib
import dnnlib.tflib as tflib

Expand Down
2 changes: 2 additions & 0 deletions projector.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@

import numpy as np
import tensorflow as tf
tf = tf.compat.v1

import dnnlib
import dnnlib.tflib as tflib

Expand Down
2 changes: 2 additions & 0 deletions training/dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,8 @@
import glob
import numpy as np
import tensorflow as tf
tf = tf.compat.v1

import dnnlib
import dnnlib.tflib as tflib

Expand Down
2 changes: 2 additions & 0 deletions training/loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@

import numpy as np
import tensorflow as tf
tf = tf.compat.v1

import dnnlib.tflib as tflib
from dnnlib.tflib.autosummary import autosummary

Expand Down
2 changes: 2 additions & 0 deletions training/networks_stylegan.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@

import numpy as np
import tensorflow as tf
tf = tf.compat.v1

import dnnlib
import dnnlib.tflib as tflib

Expand Down
2 changes: 2 additions & 0 deletions training/networks_stylegan2.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@

import numpy as np
import tensorflow as tf
tf = tf.compat.v1

import dnnlib
import dnnlib.tflib as tflib
from dnnlib.tflib.ops.upfirdn_2d import upsample_2d, downsample_2d, upsample_conv_2d, conv_downsample_2d
Expand Down
13 changes: 7 additions & 6 deletions training/training_loop.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@

import numpy as np
import tensorflow as tf
tf = tf.compat.v1
import dnnlib
import dnnlib.tflib as tflib
from dnnlib.tflib.autosummary import autosummary
Expand Down Expand Up @@ -240,11 +241,11 @@ def training_loop(
Gs_update_op = Gs.setup_as_moving_average_of(G, beta=Gs_beta)

# Finalize graph.
with tf.device('/gpu:0'):
try:
peak_gpu_mem_op = tf.contrib.memory_stats.MaxBytesInUse()
except tf.errors.NotFoundError:
peak_gpu_mem_op = tf.constant(0)
# with tf.device('/gpu:0'):
# try:
# peak_gpu_mem_op = tf.contrib.memory_stats.MaxBytesInUse()
# except tf.errors.NotFoundError:
# peak_gpu_mem_op = tf.constant(0)
tflib.init_uninitialized_vars()

print('Initializing logs...')
Expand Down Expand Up @@ -327,7 +328,7 @@ def training_loop(
autosummary('Timing/sec_per_tick', tick_time),
autosummary('Timing/sec_per_kimg', tick_time / tick_kimg),
autosummary('Timing/maintenance_sec', maintenance_time),
autosummary('Resources/peak_gpu_mem_gb', peak_gpu_mem_op.eval() / 2**30)))
# autosummary('Resources/peak_gpu_mem_gb', peak_gpu_mem_op.eval() / 2**30)))
autosummary('Timing/total_hours', total_time / (60.0 * 60.0))
autosummary('Timing/total_days', total_time / (24.0 * 60.0 * 60.0))

Expand Down