Revert "tools: update closure_linter to the latest(2.3.5)"

This reverts commit 6d98524609.
This reverts commit 60ff789618.

closure_linter now depends on the gflags module and not everyone will have that
installed by default.
This commit is contained in:
Ben Noordhuis 2012-06-28 04:14:24 +02:00
parent e7e34ddb66
commit 2d0011f532
42 changed files with 3441 additions and 3188 deletions

37
LICENSE
View File

@ -299,22 +299,37 @@ maintained libraries. The externally maintained libraries used by Node are:
IN THE SOFTWARE.
"""
- Closure Linter is located at tools/closure_linter. Closure Linter's license
- Closure Linter is located at tools/closure_linter. Closure's license
follows:
"""
# Copyright 2010 The Closure Linter Authors. All Rights Reserved.
# Copyright (c) 2007, Google Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# http://www.apache.org/licenses/LICENSE-2.0
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
- tools/cpplint.py is a C++ linter. Its license follows:

View File

@ -1,6 +1,6 @@
Metadata-Version: 1.0
Name: closure_linter
Version: 2.3.5
Version: 2.2.6
Summary: Closure Linter
Home-page: http://code.google.com/p/closure-linter
Author: The Closure Linter Authors

View File

@ -0,0 +1,10 @@
Metadata-Version: 1.0
Name: closure-linter
Version: 2.2.6
Summary: Closure Linter
Home-page: http://code.google.com/p/closure-linter
Author: The Closure Linter Authors
Author-email: opensource@google.com
License: Apache
Description: UNKNOWN
Platform: UNKNOWN

View File

@ -0,0 +1,41 @@
README
setup.py
closure_linter/__init__.py
closure_linter/checker.py
closure_linter/checkerbase.py
closure_linter/ecmalintrules.py
closure_linter/ecmametadatapass.py
closure_linter/error_fixer.py
closure_linter/errorrules.py
closure_linter/errors.py
closure_linter/fixjsstyle.py
closure_linter/fixjsstyle_test.py
closure_linter/full_test.py
closure_linter/gjslint.py
closure_linter/indentation.py
closure_linter/javascriptlintrules.py
closure_linter/javascriptstatetracker.py
closure_linter/javascriptstatetracker_test.py
closure_linter/javascripttokenizer.py
closure_linter/javascripttokens.py
closure_linter/statetracker.py
closure_linter/tokenutil.py
closure_linter.egg-info/PKG-INFO
closure_linter.egg-info/SOURCES.txt
closure_linter.egg-info/dependency_links.txt
closure_linter.egg-info/entry_points.txt
closure_linter.egg-info/requires.txt
closure_linter.egg-info/top_level.txt
closure_linter/common/__init__.py
closure_linter/common/error.py
closure_linter/common/erroraccumulator.py
closure_linter/common/errorhandler.py
closure_linter/common/errorprinter.py
closure_linter/common/filetestcase.py
closure_linter/common/htmlutil.py
closure_linter/common/lintrunner.py
closure_linter/common/matcher.py
closure_linter/common/position.py
closure_linter/common/simplefileflags.py
closure_linter/common/tokenizer.py
closure_linter/common/tokens.py

View File

@ -0,0 +1,4 @@
[console_scripts]
fixjsstyle = closure_linter.fixjsstyle:main
gjslint = closure_linter.gjslint:main

View File

@ -0,0 +1 @@
python-gflags

View File

@ -0,0 +1 @@
closure_linter

View File

@ -1,16 +1 @@
#!/usr/bin/env python
# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Package indicator for gjslint."""

View File

@ -22,10 +22,11 @@ __author__ = ('robbyw@google.com (Robert Walker)',
import gflags as flags
from closure_linter import checkerbase
from closure_linter import closurizednamespacesinfo
from closure_linter import ecmametadatapass
from closure_linter import errors
from closure_linter import javascriptlintrules
from closure_linter import javascriptstatetracker
from closure_linter.common import errorprinter
from closure_linter.common import lintrunner
flags.DEFINE_list('limited_doc_files', ['dummy.js', 'externs.js'],
@ -33,12 +34,6 @@ flags.DEFINE_list('limited_doc_files', ['dummy.js', 'externs.js'],
'report errors for missing documentation, some missing '
'descriptions, or methods whose @return tags don\'t have a '
'matching return statement.')
flags.DEFINE_list('closurized_namespaces', '',
'Namespace prefixes, used for testing of'
'goog.provide/require')
flags.DEFINE_list('ignored_extra_namespaces', '',
'Fully qualified namespaces that should be not be reported '
'as extra by the linter.')
class JavaScriptStyleChecker(checkerbase.CheckerBase):
@ -48,83 +43,40 @@ class JavaScriptStyleChecker(checkerbase.CheckerBase):
"""Initialize an JavaScriptStyleChecker object.
Args:
error_handler: Error handler to pass all errors to.
error_handler: Error handler to pass all errors to
"""
self._namespaces_info = None
if flags.FLAGS.closurized_namespaces:
self._namespaces_info = (
closurizednamespacesinfo.ClosurizedNamespacesInfo(
flags.FLAGS.closurized_namespaces,
flags.FLAGS.ignored_extra_namespaces))
checkerbase.CheckerBase.__init__(
self,
error_handler=error_handler,
lint_rules=javascriptlintrules.JavaScriptLintRules(
self._namespaces_info),
state_tracker=javascriptstatetracker.JavaScriptStateTracker(),
lint_rules=javascriptlintrules.JavaScriptLintRules(),
state_tracker=javascriptstatetracker.JavaScriptStateTracker(
closurized_namespaces=flags.FLAGS.closurized_namespaces),
metadata_pass=ecmametadatapass.EcmaMetaDataPass(),
limited_doc_files=flags.FLAGS.limited_doc_files)
def _CheckTokens(self, token, parse_error, debug_tokens):
"""Checks a token stream for lint warnings/errors.
Adds a separate pass for computing dependency information based on
goog.require and goog.provide statements prior to the main linting pass.
Args:
token: The first token in the token stream.
parse_error: A ParseError if any errors occurred.
debug_tokens: Whether every token should be printed as it is encountered
during the pass.
Returns:
A boolean indicating whether the full token stream could be checked or if
checking failed prematurely.
"""
# To maximize the amount of errors that get reported before a parse error
# is displayed, don't run the dependency pass if a parse error exists.
if self._namespaces_info and not parse_error:
self._namespaces_info.Reset()
result = (self._ExecutePass(token, self._DependencyPass) and
self._ExecutePass(token, self._LintPass,
debug_tokens=debug_tokens))
else:
result = self._ExecutePass(token, self._LintPass, parse_error,
debug_tokens)
if not result:
return False
self._lint_rules.Finalize(self._state_tracker, self._tokenizer.mode)
self._error_handler.FinishFile()
return True
def _DependencyPass(self, token):
"""Processes an invidual token for dependency information.
Used to encapsulate the logic needed to process an individual token so that
it can be passed to _ExecutePass.
Args:
token: The token to process.
"""
self._namespaces_info.ProcessToken(token, self._state_tracker)
class GJsLintRunner(lintrunner.LintRunner):
"""Wrapper class to run GJsLint."""
def Run(self, filenames, error_handler):
def Run(self, filenames, error_handler=None):
"""Run GJsLint on the given filenames.
Args:
filenames: The filenames to check
error_handler: An ErrorHandler object.
error_handler: An optional ErrorHandler object, an ErrorPrinter is used if
none is specified.
Returns:
error_count, file_count: The number of errors and the number of files that
contain errors.
"""
if not error_handler:
error_handler = errorprinter.ErrorPrinter(errors.NEW_ERRORS)
checker = JavaScriptStyleChecker(error_handler)
# Check the list of files.
for filename in filenames:
checker.Check(filename)
return error_handler

View File

@ -20,7 +20,6 @@ __author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)',
'jacobr@google.com (Jacob Richman)')
import StringIO
import traceback
import gflags as flags
@ -38,7 +37,6 @@ flags.DEFINE_boolean('debug_tokens', False,
flags.DEFINE_boolean('error_trace', False,
'Whether to show error exceptions.')
class LintRulesBase(object):
"""Base class for all classes defining the lint rules for a language."""
@ -63,14 +61,6 @@ class LintRulesBase(object):
if errorrules.ShouldReportError(code):
self.__checker.HandleError(code, message, token, position, fix_data)
def _SetLimitedDocChecks(self, limited_doc_checks):
"""Sets whether doc checking is relaxed for this file.
Args:
limited_doc_checks: Whether doc checking is relaxed for this file.
"""
self._limited_doc_checks = limited_doc_checks
def CheckToken(self, token, parser_state):
"""Checks a token, given the current parser_state, for warnings and errors.
@ -112,17 +102,13 @@ class CheckerBase(object):
documentation comments.
metadata_pass: Object that builds metadata about the token stream.
"""
self._error_handler = error_handler
self._lint_rules = lint_rules
self._state_tracker = state_tracker
self._metadata_pass = metadata_pass
self._limited_doc_files = limited_doc_files
# TODO(user): Factor out. A checker does not need to know about the
# tokenizer, only the token stream.
self._tokenizer = javascripttokenizer.JavaScriptTokenizer()
self._has_errors = False
self.__error_handler = error_handler
self.__lint_rules = lint_rules
self.__state_tracker = state_tracker
self.__metadata_pass = metadata_pass
self.__limited_doc_files = limited_doc_files
self.__tokenizer = javascripttokenizer.JavaScriptTokenizer()
self.__has_errors = False
def HandleError(self, code, message, token, position=None,
fix_data=None):
@ -136,8 +122,8 @@ class CheckerBase(object):
position: The position of the error, defaults to None.
fix_data: Metadata used for fixing the error.
"""
self._has_errors = True
self._error_handler.HandleError(
self.__has_errors = True
self.__error_handler.HandleError(
error.Error(code, message, token, position, fix_data))
def HasErrors(self):
@ -146,31 +132,21 @@ class CheckerBase(object):
Returns:
True if the style checker has found any errors.
"""
return self._has_errors
return self.__has_errors
def Check(self, filename, source=None):
def Check(self, filename):
"""Checks the file, printing warnings and errors as they are found.
Args:
filename: The name of the file to check.
source: Optional. The contents of the file. Can be either a string or
file-like object. If omitted, contents will be read from disk from
the given filename.
"""
if source is None:
try:
f = open(filename)
except IOError:
self._error_handler.HandleFile(filename, None)
self.HandleError(errors.FILE_NOT_FOUND, 'File not found', None)
self._error_handler.FinishFile()
return
else:
if type(source) in [str, unicode]:
f = StringIO.StringIO(source)
else:
f = source
try:
f = open(filename)
except IOError:
self.__error_handler.HandleFile(filename, None)
self.HandleError(errors.FILE_NOT_FOUND, 'File not found', None)
self.__error_handler.FinishFile()
return
try:
if filename.endswith('.html') or filename.endswith('.htm'):
@ -194,22 +170,24 @@ class CheckerBase(object):
failed prematurely.
"""
limited_doc_checks = False
if self._limited_doc_files:
for limited_doc_filename in self._limited_doc_files:
if self.__limited_doc_files:
for limited_doc_filename in self.__limited_doc_files:
if filename.endswith(limited_doc_filename):
limited_doc_checks = True
break
lint_rules = self._lint_rules
state_tracker = self.__state_tracker
lint_rules = self.__lint_rules
state_tracker.Reset()
lint_rules.Initialize(self, limited_doc_checks, is_html)
token = self._tokenizer.TokenizeFile(lines_iter)
token = self.__tokenizer.TokenizeFile(lines_iter)
parse_error = None
if self._metadata_pass:
if self.__metadata_pass:
try:
self._metadata_pass.Reset()
self._metadata_pass.Process(token)
self.__metadata_pass.Reset()
self.__metadata_pass.Process(token)
except ecmametadatapass.ParseError, caught_parse_error:
if FLAGS.error_trace:
traceback.print_exc()
@ -219,94 +197,41 @@ class CheckerBase(object):
traceback.print_exc()
return False
self._error_handler.HandleFile(filename, token)
self.__error_handler.HandleFile(filename, token)
return self._CheckTokens(token, parse_error=parse_error,
debug_tokens=FLAGS.debug_tokens)
def _CheckTokens(self, token, parse_error, debug_tokens):
"""Checks a token stream for lint warnings/errors.
Args:
token: The first token in the token stream to check.
parse_error: A ParseError if any errors occurred.
debug_tokens: Whether every token should be printed as it is encountered
during the pass.
Returns:
A boolean indicating whether the full token stream could be checked or if
checking failed prematurely.
"""
result = self._ExecutePass(token, self._LintPass, parse_error, debug_tokens)
if not result:
return False
self._lint_rules.Finalize(self._state_tracker, self._tokenizer.mode)
self._error_handler.FinishFile()
return True
def _LintPass(self, token):
"""Checks an individual token for lint warnings/errors.
Used to encapsulate the logic needed to check an individual token so that it
can be passed to _ExecutePass.
Args:
token: The token to check.
"""
self._lint_rules.CheckToken(token, self._state_tracker)
def _ExecutePass(self, token, pass_function, parse_error=None,
debug_tokens=False):
"""Calls the given function for every token in the given token stream.
As each token is passed to the given function, state is kept up to date and,
depending on the error_trace flag, errors are either caught and reported, or
allowed to bubble up so developers can see the full stack trace. If a parse
error is specified, the pass will proceed as normal until the token causing
the parse error is reached.
Args:
token: The first token in the token stream.
pass_function: The function to call for each token in the token stream.
parse_error: A ParseError if any errors occurred.
debug_tokens: Whether every token should be printed as it is encountered
during the pass.
Returns:
A boolean indicating whether the full token stream could be checked or if
checking failed prematurely.
Raises:
Exception: If any error occurred while calling the given function.
"""
self._state_tracker.Reset()
while token:
if debug_tokens:
if FLAGS.debug_tokens:
print token
if parse_error and parse_error.token == token:
# Report any parse errors from above once we find the token.
message = ('Error parsing file at token "%s". Unable to '
'check the rest of file.' % token.string)
self.HandleError(errors.FILE_DOES_NOT_PARSE, message, token)
self._error_handler.FinishFile()
return
self.__error_handler.FinishFile()
return False
try:
self._state_tracker.HandleToken(
token, self._state_tracker.GetLastNonSpaceToken())
pass_function(token)
self._state_tracker.HandleAfterToken(token)
except:
if FLAGS.error_trace:
raise
else:
if FLAGS.error_trace:
state_tracker.HandleToken(token, state_tracker.GetLastNonSpaceToken())
else:
try:
state_tracker.HandleToken(token, state_tracker.GetLastNonSpaceToken())
except:
self.HandleError(errors.FILE_DOES_NOT_PARSE,
('Error parsing file at token "%s". Unable to '
'check the rest of file.' % token.string),
'check the rest of file.' % token.string),
token)
self._error_handler.FinishFile()
return False
self.__error_handler.FinishFile()
return False
# Check the token for style guide violations.
lint_rules.CheckToken(token, state_tracker)
state_tracker.HandleAfterToken(token)
# Move to the next token.
token = token.next
lint_rules.Finalize(state_tracker, self.__tokenizer.mode)
self.__error_handler.FinishFile()
return True

View File

@ -1,500 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic for computing dependency information for closurized JavaScript files.
Closurized JavaScript files express dependencies using goog.require and
goog.provide statements. In order for the linter to detect when a statement is
missing or unnecessary, all identifiers in the JavaScript file must first be
processed to determine if they constitute the creation or usage of a dependency.
"""
from closure_linter import javascripttokens
from closure_linter import tokenutil
# pylint: disable-msg=C6409
TokenType = javascripttokens.JavaScriptTokenType
DEFAULT_EXTRA_NAMESPACES = [
'goog.testing.asserts',
'goog.testing.jsunit',
]
class ClosurizedNamespacesInfo(object):
"""Dependency information for closurized JavaScript files.
Processes token streams for dependency creation or usage and provides logic
for determining if a given require or provide statement is unnecessary or if
there are missing require or provide statements.
"""
def __init__(self, closurized_namespaces, ignored_extra_namespaces):
"""Initializes an instance the ClosurizedNamespacesInfo class.
Args:
closurized_namespaces: A list of namespace prefixes that should be
processed for dependency information. Non-matching namespaces are
ignored.
ignored_extra_namespaces: A list of namespaces that should not be reported
as extra regardless of whether they are actually used.
"""
self._closurized_namespaces = closurized_namespaces
self._ignored_extra_namespaces = (ignored_extra_namespaces +
DEFAULT_EXTRA_NAMESPACES)
self.Reset()
def Reset(self):
"""Resets the internal state to prepare for processing a new file."""
# A list of goog.provide tokens in the order they appeared in the file.
self._provide_tokens = []
# A list of goog.require tokens in the order they appeared in the file.
self._require_tokens = []
# Namespaces that are already goog.provided.
self._provided_namespaces = []
# Namespaces that are already goog.required.
self._required_namespaces = []
# Note that created_namespaces and used_namespaces contain both namespaces
# and identifiers because there are many existing cases where a method or
# constant is provided directly instead of its namespace. Ideally, these
# two lists would only have to contain namespaces.
# A list of tuples where the first element is the namespace of an identifier
# created in the file and the second is the identifier itself.
self._created_namespaces = []
# A list of tuples where the first element is the namespace of an identifier
# used in the file and the second is the identifier itself.
self._used_namespaces = []
# A list of seemingly-unnecessary namespaces that are goog.required() and
# annotated with @suppress {extraRequire}.
self._suppressed_requires = []
# A list of goog.provide tokens which are duplicates.
self._duplicate_provide_tokens = []
# A list of goog.require tokens which are duplicates.
self._duplicate_require_tokens = []
# Whether this file is in a goog.scope. Someday, we may add support
# for checking scopified namespaces, but for now let's just fail
# in a more reasonable way.
self._scopified_file = False
# TODO(user): Handle the case where there are 2 different requires
# that can satisfy the same dependency, but only one is necessary.
def GetProvidedNamespaces(self):
"""Returns the namespaces which are already provided by this file.
Returns:
A list of strings where each string is a 'namespace' corresponding to an
existing goog.provide statement in the file being checked.
"""
return list(self._provided_namespaces)
def GetRequiredNamespaces(self):
"""Returns the namespaces which are already required by this file.
Returns:
A list of strings where each string is a 'namespace' corresponding to an
existing goog.require statement in the file being checked.
"""
return list(self._required_namespaces)
def IsExtraProvide(self, token):
"""Returns whether the given goog.provide token is unnecessary.
Args:
token: A goog.provide token.
Returns:
True if the given token corresponds to an unnecessary goog.provide
statement, otherwise False.
"""
if self._scopified_file:
return False
namespace = tokenutil.Search(token, TokenType.STRING_TEXT).string
base_namespace = namespace.split('.', 1)[0]
if base_namespace not in self._closurized_namespaces:
return False
if token in self._duplicate_provide_tokens:
return True
# TODO(user): There's probably a faster way to compute this.
for created_namespace, created_identifier in self._created_namespaces:
if namespace == created_namespace or namespace == created_identifier:
return False
return True
def IsExtraRequire(self, token):
"""Returns whether the given goog.require token is unnecessary.
Args:
token: A goog.require token.
Returns:
True if the given token corresponds to an unnecessary goog.require
statement, otherwise False.
"""
if self._scopified_file:
return False
namespace = tokenutil.Search(token, TokenType.STRING_TEXT).string
base_namespace = namespace.split('.', 1)[0]
if base_namespace not in self._closurized_namespaces:
return False
if namespace in self._ignored_extra_namespaces:
return False
if token in self._duplicate_require_tokens:
return True
if namespace in self._suppressed_requires:
return False
# If the namespace contains a component that is initial caps, then that
# must be the last component of the namespace.
parts = namespace.split('.')
if len(parts) > 1 and parts[-2][0].isupper():
return True
# TODO(user): There's probably a faster way to compute this.
for used_namespace, used_identifier in self._used_namespaces:
if namespace == used_namespace or namespace == used_identifier:
return False
return True
def GetMissingProvides(self):
"""Returns the set of missing provided namespaces for the current file.
Returns:
Returns a set of strings where each string is a namespace that should be
provided by this file, but is not.
"""
if self._scopified_file:
return set()
missing_provides = set()
for namespace, identifier in self._created_namespaces:
if (not self._IsPrivateIdentifier(identifier) and
namespace not in self._provided_namespaces and
identifier not in self._provided_namespaces and
namespace not in self._required_namespaces):
missing_provides.add(namespace)
return missing_provides
def GetMissingRequires(self):
"""Returns the set of missing required namespaces for the current file.
For each non-private identifier used in the file, find either a
goog.require, goog.provide or a created identifier that satisfies it.
goog.require statements can satisfy the identifier by requiring either the
namespace of the identifier or the identifier itself. goog.provide
statements can satisfy the identifier by providing the namespace of the
identifier. A created identifier can only satisfy the used identifier if
it matches it exactly (necessary since things can be defined on a
namespace in more than one file). Note that provided namespaces should be
a subset of created namespaces, but we check both because in some cases we
can't always detect the creation of the namespace.
Returns:
Returns a set of strings where each string is a namespace that should be
required by this file, but is not.
"""
if self._scopified_file:
return set()
external_dependencies = set(self._required_namespaces)
# Assume goog namespace is always available.
external_dependencies.add('goog')
created_identifiers = set()
for namespace, identifier in self._created_namespaces:
created_identifiers.add(identifier)
missing_requires = set()
for namespace, identifier in self._used_namespaces:
if (not self._IsPrivateIdentifier(identifier) and
namespace not in external_dependencies and
namespace not in self._provided_namespaces and
identifier not in external_dependencies and
identifier not in created_identifiers):
missing_requires.add(namespace)
return missing_requires
def _IsPrivateIdentifier(self, identifier):
"""Returns whether the given identifer is private."""
pieces = identifier.split('.')
for piece in pieces:
if piece.endswith('_'):
return True
return False
def IsFirstProvide(self, token):
"""Returns whether token is the first provide token."""
return self._provide_tokens and token == self._provide_tokens[0]
def IsFirstRequire(self, token):
"""Returns whether token is the first require token."""
return self._require_tokens and token == self._require_tokens[0]
def IsLastProvide(self, token):
"""Returns whether token is the last provide token."""
return self._provide_tokens and token == self._provide_tokens[-1]
def IsLastRequire(self, token):
"""Returns whether token is the last require token."""
return self._require_tokens and token == self._require_tokens[-1]
def ProcessToken(self, token, state_tracker):
"""Processes the given token for dependency information.
Args:
token: The token to process.
state_tracker: The JavaScript state tracker.
"""
# Note that this method is in the critical path for the linter and has been
# optimized for performance in the following ways:
# - Tokens are checked by type first to minimize the number of function
# calls necessary to determine if action needs to be taken for the token.
# - The most common tokens types are checked for first.
# - The number of function calls has been minimized (thus the length of this
# function.
if token.type == TokenType.IDENTIFIER:
# TODO(user): Consider saving the whole identifier in metadata.
whole_identifier_string = self._GetWholeIdentifierString(token)
if whole_identifier_string is None:
# We only want to process the identifier one time. If the whole string
# identifier is None, that means this token was part of a multi-token
# identifier, but it was not the first token of the identifier.
return
# In the odd case that a goog.require is encountered inside a function,
# just ignore it (e.g. dynamic loading in test runners).
if token.string == 'goog.require' and not state_tracker.InFunction():
self._require_tokens.append(token)
namespace = tokenutil.Search(token, TokenType.STRING_TEXT).string
if namespace in self._required_namespaces:
self._duplicate_require_tokens.append(token)
else:
self._required_namespaces.append(namespace)
# If there is a suppression for the require, add a usage for it so it
# gets treated as a regular goog.require (i.e. still gets sorted).
jsdoc = state_tracker.GetDocComment()
if jsdoc and ('extraRequire' in jsdoc.suppressions):
self._suppressed_requires.append(namespace)
self._AddUsedNamespace(state_tracker, namespace)
elif token.string == 'goog.provide':
self._provide_tokens.append(token)
namespace = tokenutil.Search(token, TokenType.STRING_TEXT).string
if namespace in self._provided_namespaces:
self._duplicate_provide_tokens.append(token)
else:
self._provided_namespaces.append(namespace)
# If there is a suppression for the provide, add a creation for it so it
# gets treated as a regular goog.provide (i.e. still gets sorted).
jsdoc = state_tracker.GetDocComment()
if jsdoc and ('extraProvide' in jsdoc.suppressions):
self._AddCreatedNamespace(state_tracker, namespace)
elif token.string == 'goog.scope':
self._scopified_file = True
else:
jsdoc = state_tracker.GetDocComment()
if jsdoc and jsdoc.HasFlag('typedef'):
self._AddCreatedNamespace(state_tracker, whole_identifier_string,
self.GetClosurizedNamespace(
whole_identifier_string))
else:
self._AddUsedNamespace(state_tracker, whole_identifier_string)
elif token.type == TokenType.SIMPLE_LVALUE:
identifier = token.values['identifier']
namespace = self.GetClosurizedNamespace(identifier)
if state_tracker.InFunction():
self._AddUsedNamespace(state_tracker, identifier)
elif namespace and namespace != 'goog':
self._AddCreatedNamespace(state_tracker, identifier, namespace)
elif token.type == TokenType.DOC_FLAG:
flag_type = token.attached_object.flag_type
is_interface = state_tracker.GetDocComment().HasFlag('interface')
if flag_type == 'implements' or (flag_type == 'extends' and is_interface):
# Interfaces should be goog.require'd.
doc_start = tokenutil.Search(token, TokenType.DOC_START_BRACE)
interface = tokenutil.Search(doc_start, TokenType.COMMENT)
self._AddUsedNamespace(state_tracker, interface.string)
def _GetWholeIdentifierString(self, token):
"""Returns the whole identifier string for the given token.
Checks the tokens after the current one to see if the token is one in a
sequence of tokens which are actually just one identifier (i.e. a line was
wrapped in the middle of an identifier).
Args:
token: The token to check.
Returns:
The whole identifier string or None if this token is not the first token
in a multi-token identifier.
"""
result = ''
# Search backward to determine if this token is the first token of the
# identifier. If it is not the first token, return None to signal that this
# token should be ignored.
prev_token = token.previous
while prev_token:
if (prev_token.IsType(TokenType.IDENTIFIER) or
prev_token.IsType(TokenType.NORMAL) and prev_token.string == '.'):
return None
elif (not prev_token.IsType(TokenType.WHITESPACE) and
not prev_token.IsAnyType(TokenType.COMMENT_TYPES)):
break
prev_token = prev_token.previous
# Search forward to find other parts of this identifier separated by white
# space.
next_token = token
while next_token:
if (next_token.IsType(TokenType.IDENTIFIER) or
next_token.IsType(TokenType.NORMAL) and next_token.string == '.'):
result += next_token.string
elif (not next_token.IsType(TokenType.WHITESPACE) and
not next_token.IsAnyType(TokenType.COMMENT_TYPES)):
break
next_token = next_token.next
return result
def _AddCreatedNamespace(self, state_tracker, identifier, namespace=None):
"""Adds the namespace of an identifier to the list of created namespaces.
If the identifier is annotated with a 'missingProvide' suppression, it is
not added.
Args:
state_tracker: The JavaScriptStateTracker instance.
identifier: The identifier to add.
namespace: The namespace of the identifier or None if the identifier is
also the namespace.
"""
if not namespace:
namespace = identifier
jsdoc = state_tracker.GetDocComment()
if jsdoc and 'missingProvide' in jsdoc.suppressions:
return
self._created_namespaces.append([namespace, identifier])
def _AddUsedNamespace(self, state_tracker, identifier):
"""Adds the namespace of an identifier to the list of used namespaces.
If the identifier is annotated with a 'missingRequire' suppression, it is
not added.
Args:
state_tracker: The JavaScriptStateTracker instance.
identifier: An identifier which has been used.
"""
jsdoc = state_tracker.GetDocComment()
if jsdoc and 'missingRequire' in jsdoc.suppressions:
return
namespace = self.GetClosurizedNamespace(identifier)
if namespace:
self._used_namespaces.append([namespace, identifier])
def GetClosurizedNamespace(self, identifier):
"""Given an identifier, returns the namespace that identifier is from.
Args:
identifier: The identifier to extract a namespace from.
Returns:
The namespace the given identifier resides in, or None if one could not
be found.
"""
if identifier.startswith('goog.global'):
# Ignore goog.global, since it is, by definition, global.
return None
parts = identifier.split('.')
for namespace in self._closurized_namespaces:
if not identifier.startswith(namespace + '.'):
continue
last_part = parts[-1]
if not last_part:
# TODO(robbyw): Handle this: it's a multi-line identifier.
return None
# The namespace for a class is the shortest prefix ending in a class
# name, which starts with a capital letter but is not a capitalized word.
#
# We ultimately do not want to allow requiring or providing of inner
# classes/enums. Instead, a file should provide only the top-level class
# and users should require only that.
namespace = []
for part in parts:
if part == 'prototype' or part.isupper():
return '.'.join(namespace)
namespace.append(part)
if part[0].isupper():
return '.'.join(namespace)
# At this point, we know there's no class or enum, so the namespace is
# just the identifier with the last part removed. With the exception of
# apply, inherits, and call, which should also be stripped.
if parts[-1] in ('apply', 'inherits', 'call'):
parts.pop()
parts.pop()
# If the last part ends with an underscore, it is a private variable,
# method, or enum. The namespace is whatever is before it.
if parts and parts[-1].endswith('_'):
parts.pop()
return '.'.join(parts)
return None

View File

@ -1,451 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2010 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for ClosurizedNamespacesInfo."""
import unittest as googletest
from closure_linter import closurizednamespacesinfo
from closure_linter import javascriptstatetracker
from closure_linter import javascripttokenizer
from closure_linter import javascripttokens
from closure_linter import tokenutil
# pylint: disable-msg=C6409
TokenType = javascripttokens.JavaScriptTokenType
class ClosurizedNamespacesInfoTest(googletest.TestCase):
"""Tests for ClosurizedNamespacesInfo."""
_test_cases = {
'goog.global.anything': None,
'package.CONSTANT': 'package',
'package.methodName': 'package',
'package.subpackage.methodName': 'package.subpackage',
'package.subpackage.methodName.apply': 'package.subpackage',
'package.ClassName.something': 'package.ClassName',
'package.ClassName.Enum.VALUE.methodName': 'package.ClassName',
'package.ClassName.CONSTANT': 'package.ClassName',
'package.namespace.CONSTANT.methodName': 'package.namespace',
'package.ClassName.inherits': 'package.ClassName',
'package.ClassName.apply': 'package.ClassName',
'package.ClassName.methodName.apply': 'package.ClassName',
'package.ClassName.methodName.call': 'package.ClassName',
'package.ClassName.prototype.methodName': 'package.ClassName',
'package.ClassName.privateMethod_': 'package.ClassName',
'package.className.privateProperty_': 'package.className',
'package.className.privateProperty_.methodName': 'package.className',
'package.ClassName.PrivateEnum_': 'package.ClassName',
'package.ClassName.prototype.methodName.apply': 'package.ClassName',
'package.ClassName.property.subProperty': 'package.ClassName',
'package.className.prototype.something.somethingElse': 'package.className'
}
_tokenizer = javascripttokenizer.JavaScriptTokenizer()
def testGetClosurizedNamespace(self):
"""Tests that the correct namespace is returned for various identifiers."""
namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo(
closurized_namespaces=['package'], ignored_extra_namespaces=[])
for identifier, expected_namespace in self._test_cases.items():
actual_namespace = namespaces_info.GetClosurizedNamespace(identifier)
self.assertEqual(
expected_namespace,
actual_namespace,
'expected namespace "' + str(expected_namespace) +
'" for identifier "' + str(identifier) + '" but was "' +
str(actual_namespace) + '"')
def testIgnoredExtraNamespaces(self):
"""Tests that ignored_extra_namespaces are ignored."""
token = self._GetRequireTokens('package.Something')
namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo(
closurized_namespaces=['package'],
ignored_extra_namespaces=['package.Something'])
self.assertFalse(namespaces_info.IsExtraRequire(token),
'Should be valid since it is in ignored namespaces.')
namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo(
['package'], [])
self.assertTrue(namespaces_info.IsExtraRequire(token),
'Should be invalid since it is not in ignored namespaces.')
def testIsExtraProvide_created(self):
"""Tests that provides for created namespaces are not extra."""
input_lines = [
'goog.provide(\'package.Foo\');',
'package.Foo = function() {};'
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertFalse(namespaces_info.IsExtraProvide(token),
'Should not be extra since it is created.')
def testIsExtraProvide_createdIdentifier(self):
"""Tests that provides for created identifiers are not extra."""
input_lines = [
'goog.provide(\'package.Foo.methodName\');',
'package.Foo.methodName = function() {};'
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertFalse(namespaces_info.IsExtraProvide(token),
'Should not be extra since it is created.')
def testIsExtraProvide_notCreated(self):
"""Tests that provides for non-created namespaces are extra."""
input_lines = ['goog.provide(\'package.Foo\');']
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertTrue(namespaces_info.IsExtraProvide(token),
'Should be extra since it is not created.')
def testIsExtraProvide_duplicate(self):
"""Tests that providing a namespace twice makes the second one extra."""
input_lines = [
'goog.provide(\'package.Foo\');',
'goog.provide(\'package.Foo\');',
'package.Foo = function() {};'
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
# Advance to the second goog.provide token.
token = tokenutil.Search(token.next, TokenType.IDENTIFIER)
self.assertTrue(namespaces_info.IsExtraProvide(token),
'Should be extra since it is already provided.')
def testIsExtraProvide_notClosurized(self):
"""Tests that provides of non-closurized namespaces are not extra."""
input_lines = ['goog.provide(\'notclosurized.Foo\');']
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertFalse(namespaces_info.IsExtraProvide(token),
'Should not be extra since it is not closurized.')
def testIsExtraRequire_used(self):
"""Tests that requires for used namespaces are not extra."""
input_lines = [
'goog.require(\'package.Foo\');',
'var x = package.Foo.methodName();'
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertFalse(namespaces_info.IsExtraRequire(token),
'Should not be extra since it is used.')
def testIsExtraRequire_usedIdentifier(self):
"""Tests that requires for used methods on classes are extra."""
input_lines = [
'goog.require(\'package.Foo.methodName\');',
'var x = package.Foo.methodName();'
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertTrue(namespaces_info.IsExtraRequire(token),
'Should require the package, not the method specifically.')
def testIsExtraRequire_notUsed(self):
"""Tests that requires for unused namespaces are extra."""
input_lines = ['goog.require(\'package.Foo\');']
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertTrue(namespaces_info.IsExtraRequire(token),
'Should be extra since it is not used.')
def testIsExtraRequire_notClosurized(self):
"""Tests that requires of non-closurized namespaces are not extra."""
input_lines = ['goog.require(\'notclosurized.Foo\');']
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertFalse(namespaces_info.IsExtraRequire(token),
'Should not be extra since it is not closurized.')
def testIsExtraRequire_objectOnClass(self):
"""Tests that requiring an object on a class is extra."""
input_lines = [
'goog.require(\'package.Foo.Enum\');',
'var x = package.Foo.Enum.VALUE1;',
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertTrue(namespaces_info.IsExtraRequire(token),
'The whole class, not the object, should be required.');
def testIsExtraRequire_constantOnClass(self):
"""Tests that requiring a constant on a class is extra."""
input_lines = [
'goog.require(\'package.Foo.CONSTANT\');',
'var x = package.Foo.CONSTANT',
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertTrue(namespaces_info.IsExtraRequire(token),
'The class, not the constant, should be required.');
def testIsExtraRequire_constantNotOnClass(self):
"""Tests that requiring a constant not on a class is OK."""
input_lines = [
'goog.require(\'package.subpackage.CONSTANT\');',
'var x = package.subpackage.CONSTANT',
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertFalse(namespaces_info.IsExtraRequire(token),
'Constants can be required except on classes.');
def testIsExtraRequire_methodNotOnClass(self):
"""Tests that requiring a method not on a class is OK."""
input_lines = [
'goog.require(\'package.subpackage.method\');',
'var x = package.subpackage.method()',
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertFalse(namespaces_info.IsExtraRequire(token),
'Methods can be required except on classes.');
def testIsExtraRequire_defaults(self):
"""Tests that there are no warnings about extra requires for test utils"""
input_lines = ['goog.require(\'goog.testing.jsunit\');']
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['goog'], [])
self.assertFalse(namespaces_info.IsExtraRequire(token),
'Should not be extra since it is for testing.')
def testGetMissingProvides_provided(self):
"""Tests that provided functions don't cause a missing provide."""
input_lines = [
'goog.provide(\'package.Foo\');',
'package.Foo = function() {};'
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
def testGetMissingProvides_providedIdentifier(self):
"""Tests that provided identifiers don't cause a missing provide."""
input_lines = [
'goog.provide(\'package.Foo.methodName\');',
'package.Foo.methodName = function() {};'
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
def testGetMissingProvides_providedParentIdentifier(self):
"""Tests that provided identifiers on a class don't cause a missing provide
on objects attached to that class."""
input_lines = [
'goog.provide(\'package.foo.ClassName\');',
'package.foo.ClassName.methodName = function() {};',
'package.foo.ClassName.ObjectName = 1;',
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
def testGetMissingProvides_unprovided(self):
"""Tests that unprovided functions cause a missing provide."""
input_lines = ['package.Foo = function() {};']
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertEquals(1, len(namespaces_info.GetMissingProvides()))
self.assertTrue('package.Foo' in namespaces_info.GetMissingProvides())
def testGetMissingProvides_privatefunction(self):
"""Tests that unprovided private functions don't cause a missing provide."""
input_lines = ['package.Foo_ = function() {};']
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
def testGetMissingProvides_required(self):
"""Tests that required namespaces don't cause a missing provide."""
input_lines = [
'goog.require(\'package.Foo\');',
'package.Foo.methodName = function() {};'
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
def testGetMissingRequires_required(self):
"""Tests that required namespaces don't cause a missing require."""
input_lines = [
'goog.require(\'package.Foo\');',
'package.Foo();'
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
def testGetMissingRequires_requiredIdentifier(self):
"""Tests that required namespaces satisfy identifiers on that namespace."""
input_lines = [
'goog.require(\'package.Foo\');',
'package.Foo.methodName();'
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
def testGetMissingRequires_requiredParentClass(self):
"""Tests that requiring a parent class of an object is sufficient to prevent
a missing require on that object."""
input_lines = [
'goog.require(\'package.Foo\');',
'package.Foo.methodName();',
'package.Foo.methodName(package.Foo.ObjectName);'
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertEquals(0, len(namespaces_info.GetMissingRequires()))
def testGetMissingRequires_unrequired(self):
"""Tests that unrequired namespaces cause a missing require."""
input_lines = ['package.Foo();']
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertEquals(1, len(namespaces_info.GetMissingRequires()))
self.assertTrue('package.Foo' in namespaces_info.GetMissingRequires())
def testGetMissingRequires_provided(self):
"""Tests that provided namespaces satisfy identifiers on that namespace."""
input_lines = [
'goog.provide(\'package.Foo\');',
'package.Foo.methodName();'
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertEquals(0, len(namespaces_info.GetMissingRequires()))
def testGetMissingRequires_created(self):
"""Tests that created namespaces do not satisfy usage of an identifier."""
input_lines = [
'package.Foo = function();',
'package.Foo.methodName();'
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertEquals(1, len(namespaces_info.GetMissingRequires()))
self.assertTrue('package.Foo' in namespaces_info.GetMissingRequires())
def testGetMissingRequires_createdIdentifier(self):
"""Tests that created identifiers satisfy usage of the identifier."""
input_lines = [
'package.Foo.methodName = function();',
'package.Foo.methodName();'
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertEquals(0, len(namespaces_info.GetMissingRequires()))
def testGetMissingRequires_objectOnClass(self):
"""Tests that we should require a class, not the object on the class."""
input_lines = [
'goog.require(\'package.Foo.Enum\');',
'var x = package.Foo.Enum.VALUE1;',
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertEquals(1, len(namespaces_info.GetMissingRequires()),
'The whole class, not the object, should be required.');
def testIsFirstProvide(self):
"""Tests operation of the isFirstProvide method."""
input_lines = [
'goog.provide(\'package.Foo\');',
'package.Foo.methodName();'
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertTrue(namespaces_info.IsFirstProvide(token))
def testGetWholeIdentifierString(self):
"""Tests that created identifiers satisfy usage of the identifier."""
input_lines = [
'package.Foo.',
' veryLong.',
' identifier;'
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo([], [])
self.assertEquals('package.Foo.veryLong.identifier',
namespaces_info._GetWholeIdentifierString(token))
self.assertEquals(None,
namespaces_info._GetWholeIdentifierString(token.next))
def _GetInitializedNamespacesInfo(self, token, closurized_namespaces,
ignored_extra_namespaces):
"""Returns a namespaces info initialized with the given token stream."""
namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo(
closurized_namespaces=closurized_namespaces,
ignored_extra_namespaces=ignored_extra_namespaces)
state_tracker = javascriptstatetracker.JavaScriptStateTracker()
while token:
namespaces_info.ProcessToken(token, state_tracker)
token = token.next
return namespaces_info
def _GetProvideTokens(self, namespace):
"""Returns a list of tokens for a goog.require of the given namespace."""
line_text = 'goog.require(\'' + namespace + '\');\n'
return javascripttokenizer.JavaScriptTokenizer().TokenizeFile([line_text])
def _GetRequireTokens(self, namespace):
"""Returns a list of tokens for a goog.require of the given namespace."""
line_text = 'goog.require(\'' + namespace + '\');\n'
return javascripttokenizer.JavaScriptTokenizer().TokenizeFile([line_text])
if __name__ == '__main__':
googletest.main()

View File

@ -1,16 +1 @@
#!/usr/bin/env python
# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Package indicator for gjslint.common."""

View File

@ -35,7 +35,7 @@ class ErrorAccumulator(errorhandler.ErrorHandler):
Args:
error: The error object
"""
self._errors.append(error)
self._errors.append((error.token.line_number, error.code))
def GetErrors(self):
"""Returns the accumulated errors.

View File

@ -1,52 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions to format errors."""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)',
'nnaze@google.com (Nathan Naze)')
def GetUnixErrorOutput(filename, error, new_error=False):
"""Get a output line for an error in UNIX format."""
line = ''
if error.token:
line = '%d' % error.token.line_number
error_code = '%04d' % error.code
if new_error:
error_code = 'New Error ' + error_code
return '%s:%s:(%s) %s' % (filename, line, error_code, error.message)
def GetErrorOutput(error, new_error=False):
"""Get a output line for an error in regular format."""
line = ''
if error.token:
line = 'Line %d, ' % error.token.line_number
code = 'E:%04d' % error.code
error_message = error.message
if new_error:
error_message = 'New Error ' + error_message
return '%s%s: %s' % (line, code, error.message)

View File

@ -0,0 +1,203 @@
#!/usr/bin/env python
#
# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Linter error handler class that prints errors to stdout."""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)')
from closure_linter.common import error
from closure_linter.common import errorhandler
Error = error.Error
# The error message is of the format:
# Line <number>, E:<code>: message
DEFAULT_FORMAT = 1
# The error message is of the format:
# filename:[line number]:message
UNIX_FORMAT = 2
class ErrorPrinter(errorhandler.ErrorHandler):
"""ErrorHandler that prints errors to stdout."""
def __init__(self, new_errors=None):
"""Initializes this error printer.
Args:
new_errors: A sequence of error codes representing recently introduced
errors, defaults to None.
"""
# Number of errors
self._error_count = 0
# Number of new errors
self._new_error_count = 0
# Number of files checked
self._total_file_count = 0
# Number of files with errors
self._error_file_count = 0
# Dict of file name to number of errors
self._file_table = {}
# List of errors for each file
self._file_errors = None
# Current file
self._filename = None
self._format = DEFAULT_FORMAT
if new_errors:
self._new_errors = frozenset(new_errors)
else:
self._new_errors = frozenset(set())
def SetFormat(self, format):
"""Sets the print format of errors.
Args:
format: One of {DEFAULT_FORMAT, UNIX_FORMAT}.
"""
self._format = format
def HandleFile(self, filename, first_token):
"""Notifies this ErrorPrinter that subsequent errors are in filename.
Sets the current file name, and sets a flag stating the header for this file
has not been printed yet.
Should be called by a linter before a file is style checked.
Args:
filename: The name of the file about to be checked.
first_token: The first token in the file, or None if there was an error
opening the file
"""
if self._filename and self._file_table[self._filename]:
print
self._filename = filename
self._file_table[filename] = 0
self._total_file_count += 1
self._file_errors = []
def HandleError(self, error):
"""Prints a formatted error message about the specified error.
The error message is of the format:
Error #<code>, line #<number>: message
Args:
error: The error object
"""
self._file_errors.append(error)
self._file_table[self._filename] += 1
self._error_count += 1
if self._new_errors and error.code in self._new_errors:
self._new_error_count += 1
def _PrintError(self, error):
"""Prints a formatted error message about the specified error.
Args:
error: The error object
"""
new_error = self._new_errors and error.code in self._new_errors
if self._format == DEFAULT_FORMAT:
line = ''
if error.token:
line = 'Line %d, ' % error.token.line_number
code = 'E:%04d' % error.code
if new_error:
print '%s%s: (New error) %s' % (line, code, error.message)
else:
print '%s%s: %s' % (line, code, error.message)
else:
# UNIX format
filename = self._filename
line = ''
if error.token:
line = '%d' % error.token.line_number
error_code = '%04d' % error.code
if new_error:
error_code = 'New Error ' + error_code
print '%s:%s:(%s) %s' % (filename, line, error_code, error.message)
def FinishFile(self):
"""Finishes handling the current file."""
if self._file_errors:
self._error_file_count += 1
if self._format != UNIX_FORMAT:
print '----- FILE : %s -----' % (self._filename)
self._file_errors.sort(Error.Compare)
for error in self._file_errors:
self._PrintError(error)
def HasErrors(self):
"""Whether this error printer encountered any errors.
Returns:
True if the error printer encountered any errors.
"""
return self._error_count
def HasNewErrors(self):
"""Whether this error printer encountered any new errors.
Returns:
True if the error printer encountered any new errors.
"""
return self._new_error_count
def HasOldErrors(self):
"""Whether this error printer encountered any old errors.
Returns:
True if the error printer encountered any old errors.
"""
return self._error_count - self._new_error_count
def PrintSummary(self):
"""Print a summary of the number of errors and files."""
if self.HasErrors() or self.HasNewErrors():
print ('Found %d errors, including %d new errors, in %d files '
'(%d files OK).' % (
self._error_count,
self._new_error_count,
self._error_file_count,
self._total_file_count - self._error_file_count))
else:
print '%d files checked, no errors found.' % self._total_file_count
def PrintFileSummary(self):
"""Print a detailed summary of the number of errors in each file."""
keys = self._file_table.keys()
keys.sort()
for filename in keys:
print '%s: %d' % (filename, self._file_table[filename])

View File

@ -101,8 +101,5 @@ class AnnotatedFileTestCase(googletest.TestCase):
self._runner.Run([filename], errors)
errors = errors.GetErrors()
# Convert to expected tuple format.
error_msgs = [(error.token.line_number, error.code) for error in errors]
error_msgs.sort()
return error_msgs
errors.sort()
return errors

View File

@ -123,17 +123,3 @@ class Token(object):
return '<Token: %s, "%s", %r, %d, %r>' % (self.type, self.string,
self.values, self.line_number,
self.metadata)
def __iter__(self):
"""Returns a token iterator."""
node = self
while node:
yield node
node = node.next
def __reversed__(self):
"""Returns a reverse-direction token iterator."""
node = self
while node:
yield node
node = node.previous

View File

@ -1,101 +0,0 @@
#!/usr/bin/env python
# Copyright 2011 The Closure Linter Authors. All Rights Reserved.
import unittest as googletest
from closure_linter.common import tokens
def _CreateDummyToken():
return tokens.Token('foo', None, 1, 1)
def _CreateDummyTokens(count):
dummy_tokens = []
for _ in xrange(count):
dummy_tokens.append(_CreateDummyToken())
return dummy_tokens
def _SetTokensAsNeighbors(neighbor_tokens):
for i in xrange(len(neighbor_tokens)):
prev_index = i - 1
next_index = i + 1
if prev_index >= 0:
neighbor_tokens[i].previous = neighbor_tokens[prev_index]
if next_index < len(neighbor_tokens):
neighbor_tokens[i].next = neighbor_tokens[next_index]
class TokensTest(googletest.TestCase):
def testIsFirstInLine(self):
# First token in file (has no previous).
self.assertTrue(_CreateDummyToken().IsFirstInLine())
a, b = _CreateDummyTokens(2)
_SetTokensAsNeighbors([a, b])
# Tokens on same line
a.line_number = 30
b.line_number = 30
self.assertFalse(b.IsFirstInLine())
# Tokens on different lines
b.line_number = 31
self.assertTrue(b.IsFirstInLine())
def testIsLastInLine(self):
# Last token in file (has no next).
self.assertTrue(_CreateDummyToken().IsLastInLine())
a, b = _CreateDummyTokens(2)
_SetTokensAsNeighbors([a, b])
# Tokens on same line
a.line_number = 30
b.line_number = 30
self.assertFalse(a.IsLastInLine())
b.line_number = 31
self.assertTrue(a.IsLastInLine())
def testIsType(self):
a = tokens.Token('foo', 'fakeType1', 1, 1)
self.assertTrue(a.IsType('fakeType1'))
self.assertFalse(a.IsType('fakeType2'))
def testIsAnyType(self):
a = tokens.Token('foo', 'fakeType1', 1, 1)
self.assertTrue(a.IsAnyType(['fakeType1', 'fakeType2']))
self.assertFalse(a.IsAnyType(['fakeType3', 'fakeType4']))
def testRepr(self):
a = tokens.Token('foo', 'fakeType1', 1, 1)
self.assertEquals('<Token: fakeType1, "foo", None, 1, None>', str(a))
def testIter(self):
dummy_tokens = _CreateDummyTokens(5)
_SetTokensAsNeighbors(dummy_tokens)
a, b, c, d, e = dummy_tokens
i = iter(a)
self.assertListEqual([a, b, c, d, e], list(i))
def testReverseIter(self):
dummy_tokens = _CreateDummyTokens(5)
_SetTokensAsNeighbors(dummy_tokens)
a, b, c, d, e = dummy_tokens
ri = reversed(e)
self.assertListEqual([e, d, c, b, a], list(ri))
if __name__ == '__main__':
googletest.main()

View File

@ -25,7 +25,6 @@ import re
from closure_linter import checkerbase
from closure_linter import ecmametadatapass
from closure_linter import error_check
from closure_linter import errors
from closure_linter import indentation
from closure_linter import javascripttokens
@ -40,6 +39,8 @@ from closure_linter.common import tokens
import gflags as flags
FLAGS = flags.FLAGS
flags.DEFINE_boolean('strict', False,
'Whether to validate against the stricter Closure style.')
flags.DEFINE_list('custom_jsdoc_tags', '', 'Extra jsdoc tags to allow')
# TODO(robbyw): Check for extra parens on return statements
@ -52,7 +53,6 @@ Context = ecmametadatapass.EcmaContext
Error = error.Error
Modes = javascripttokenizer.JavaScriptModes
Position = position.Position
Rule = error_check.Rule
Type = javascripttokens.JavaScriptTokenType
class EcmaScriptLintRules(checkerbase.LintRulesBase):
@ -183,9 +183,8 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase):
self._HandleError(errors.JSDOC_ILLEGAL_QUESTION_WITH_PIPE,
'JsDoc types cannot contain both "?" and "|": "%s"' % p, token)
if error_check.ShouldCheck(Rule.BRACES_AROUND_TYPE) and (
flag.type_start_token.type != Type.DOC_START_BRACE or
flag.type_end_token.type != Type.DOC_END_BRACE):
if FLAGS.strict and (flag.type_start_token.type != Type.DOC_START_BRACE or
flag.type_end_token.type != Type.DOC_END_BRACE):
self._HandleError(errors.MISSING_BRACES_AROUND_TYPE,
'Type must always be surrounded by curly braces.', token)
@ -250,7 +249,7 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase):
type = token.type
# Process the line change.
if not self._is_html and error_check.ShouldCheck(Rule.INDENTATION):
if not self._is_html and FLAGS.strict:
# TODO(robbyw): Support checking indentation in HTML files.
indentation_errors = self._indentation.CheckToken(token, state)
for indentation_error in indentation_errors:
@ -361,7 +360,32 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase):
token.previous, Position.All(token.previous.string))
elif type == Type.START_BRACKET:
self._HandleStartBracket(token, last_non_space_token)
if (not first_in_line and token.previous.type == Type.WHITESPACE and
last_non_space_token and
last_non_space_token.type in Type.EXPRESSION_ENDER_TYPES):
self._HandleError(errors.EXTRA_SPACE, 'Extra space before "["',
token.previous, Position.All(token.previous.string))
# If the [ token is the first token in a line we shouldn't complain
# about a missing space before [. This is because some Ecma script
# languages allow syntax like:
# [Annotation]
# class MyClass {...}
# So we don't want to blindly warn about missing spaces before [.
# In the the future, when rules for computing exactly how many spaces
# lines should be indented are added, then we can return errors for
# [ tokens that are improperly indented.
# For example:
# var someVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryLongVariableName =
# [a,b,c];
# should trigger a proper indentation warning message as [ is not indented
# by four spaces.
elif (not first_in_line and token.previous and
not token.previous.type in (
[Type.WHITESPACE, Type.START_PAREN, Type.START_BRACKET] +
Type.EXPRESSION_ENDER_TYPES)):
self._HandleError(errors.MISSING_SPACE, 'Missing space before "["',
token, Position.AtBeginning())
elif type in (Type.END_PAREN, Type.END_BRACKET):
# Ensure there is no space before closing parentheses, except when
# it's in a for statement with an omitted section, or when it's at the
@ -384,14 +408,9 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase):
elif type == Type.WHITESPACE:
if self.ILLEGAL_TAB.search(token.string):
if token.IsFirstInLine():
if token.next:
self._HandleError(errors.ILLEGAL_TAB,
'Illegal tab in whitespace before "%s"' % token.next.string,
token, Position.All(token.string))
else:
self._HandleError(errors.ILLEGAL_TAB,
'Illegal tab in whitespace',
token, Position.All(token.string))
self._HandleError(errors.ILLEGAL_TAB,
'Illegal tab in whitespace before "%s"' % token.next.string,
token, Position.All(token.string))
else:
self._HandleError(errors.ILLEGAL_TAB,
'Illegal tab in whitespace after "%s"' % token.previous.string,
@ -452,15 +471,12 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase):
self._HandleError(errors.INCORRECT_SUPPRESS_SYNTAX,
'Invalid suppress syntax: should be @suppress {errortype}. '
'Spaces matter.', token)
else:
for suppress_type in flag.type.split('|'):
if suppress_type not in state.GetDocFlag().SUPPRESS_TYPES:
self._HandleError(errors.INVALID_SUPPRESS_TYPE,
'Invalid suppression type: %s' % suppress_type,
token)
elif flag.type not in state.GetDocFlag().SUPPRESS_TYPES:
self._HandleError(errors.INVALID_SUPPRESS_TYPE,
'Invalid suppression type: %s' % flag.type,
token)
elif (error_check.ShouldCheck(Rule.WELL_FORMED_AUTHOR) and
flag.flag_type == 'author'):
elif FLAGS.strict and flag.flag_type == 'author':
# TODO(user): In non strict mode check the author tag for as much as
# it exists, though the full form checked below isn't required.
string = token.next.string
@ -530,7 +546,7 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase):
while desc_str.endswith('>'):
start_tag_index = desc_str.rfind('<')
if start_tag_index < 0:
break
break
desc_str = desc_str[:start_tag_index].rstrip()
end_position = Position(len(desc_str), 0)
@ -554,8 +570,7 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase):
self._HandleError(errors.INVALID_JSDOC_TAG,
'Invalid JsDoc tag: %s' % token.values['name'], token)
if (error_check.ShouldCheck(Rule.NO_BRACES_AROUND_INHERIT_DOC) and
token.values['name'] == 'inheritDoc' and
if (FLAGS.strict and token.values['name'] == 'inheritDoc' and
type == Type.DOC_INLINE_FLAG):
self._HandleError(errors.UNNECESSARY_BRACES_AROUND_INHERIT_DOC,
'Unnecessary braces around @inheritDoc',
@ -584,24 +599,18 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase):
identifier.startswith('this.')):
# We are at the top level and the function/member is documented.
if identifier.endswith('_') and not identifier.endswith('__'):
# Can have a private class which inherits documentation from a
# public superclass.
#
# @inheritDoc is deprecated in favor of using @override, and they
if (jsdoc.HasFlag('override') and not jsdoc.HasFlag('constructor')
and not ('accessControls' in jsdoc.suppressions)):
if jsdoc.HasFlag('override'):
self._HandleError(errors.INVALID_OVERRIDE_PRIVATE,
'%s should not override a private member.' % identifier,
jsdoc.GetFlag('override').flag_token)
if (jsdoc.HasFlag('inheritDoc') and not jsdoc.HasFlag('constructor')
and not ('accessControls' in jsdoc.suppressions)):
# Can have a private class which inherits documentation from a
# public superclass.
if jsdoc.HasFlag('inheritDoc') and not jsdoc.HasFlag('constructor'):
self._HandleError(errors.INVALID_INHERIT_DOC_PRIVATE,
'%s should not inherit from a private member.' % identifier,
jsdoc.GetFlag('inheritDoc').flag_token)
if (not jsdoc.HasFlag('private') and
not ('underscore' in jsdoc.suppressions) and not
((jsdoc.HasFlag('inheritDoc') or jsdoc.HasFlag('override')) and
('accessControls' in jsdoc.suppressions))):
not ('underscore' in jsdoc.suppressions)):
self._HandleError(errors.MISSING_PRIVATE,
'Member "%s" must have @private JsDoc.' %
identifier, token)
@ -609,23 +618,19 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase):
self._HandleError(errors.UNNECESSARY_SUPPRESS,
'@suppress {underscore} is not necessary with @private',
jsdoc.suppressions['underscore'])
elif (jsdoc.HasFlag('private') and
not self.InExplicitlyTypedLanguage()):
# It is convention to hide public fields in some ECMA
# implementations from documentation using the @private tag.
elif jsdoc.HasFlag('private'):
self._HandleError(errors.EXTRA_PRIVATE,
'Member "%s" must not have @private JsDoc' %
identifier, token)
# These flags are only legal on localizable message definitions;
# such variables always begin with the prefix MSG_.
for f in ('desc', 'hidden', 'meaning'):
if (jsdoc.HasFlag(f)
if ((jsdoc.HasFlag('desc') or jsdoc.HasFlag('hidden'))
and not identifier.startswith('MSG_')
and identifier.find('.MSG_') == -1):
self._HandleError(errors.INVALID_USE_OF_DESC_TAG,
'Member "%s" should not have @%s JsDoc' % (identifier, f),
token)
# TODO(user): Update error message to show the actual invalid
# tag, either @desc or @hidden.
self._HandleError(errors.INVALID_USE_OF_DESC_TAG,
'Member "%s" should not have @desc JsDoc' % identifier,
token)
# Check for illegaly assigning live objects as prototype property values.
index = identifier.find('.prototype.')
@ -672,8 +677,7 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase):
# Languages that don't allow variables to by typed such as
# JavaScript care but languages such as ActionScript or Java
# that allow variables to be typed don't care.
if not self._limited_doc_checks:
self.HandleMissingParameterDoc(token, params_iter.next())
self.HandleMissingParameterDoc(token, params_iter.next())
elif op == 'D':
# Deletion
@ -682,10 +686,9 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase):
docs_iter.next(), token)
elif op == 'S':
# Substitution
if not self._limited_doc_checks:
self._HandleError(errors.WRONG_PARAMETER_DOCUMENTATION,
'Parameter mismatch: got "%s", expected "%s"' %
(params_iter.next(), docs_iter.next()), token)
self._HandleError(errors.WRONG_PARAMETER_DOCUMENTATION,
'Parameter mismatch: got "%s", expected "%s"' %
(params_iter.next(), docs_iter.next()), token)
else:
# Equality - just advance the iterators
@ -719,39 +722,6 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase):
self._HandleError(errors.MISSING_SEMICOLON,
'Missing semicolon at end of line', token)
def _HandleStartBracket(self, token, last_non_space_token):
"""Handles a token that is an open bracket.
Args:
token: The token to handle.
last_non_space_token: The last token that was not a space.
"""
if (not token.IsFirstInLine() and token.previous.type == Type.WHITESPACE and
last_non_space_token and
last_non_space_token.type in Type.EXPRESSION_ENDER_TYPES):
self._HandleError(errors.EXTRA_SPACE, 'Extra space before "["',
token.previous, Position.All(token.previous.string))
# If the [ token is the first token in a line we shouldn't complain
# about a missing space before [. This is because some Ecma script
# languages allow syntax like:
# [Annotation]
# class MyClass {...}
# So we don't want to blindly warn about missing spaces before [.
# In the the future, when rules for computing exactly how many spaces
# lines should be indented are added, then we can return errors for
# [ tokens that are improperly indented.
# For example:
# var someVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryLongVariableName =
# [a,b,c];
# should trigger a proper indentation warning message as [ is not indented
# by four spaces.
elif (not token.IsFirstInLine() and token.previous and
not token.previous.type in (
[Type.WHITESPACE, Type.START_PAREN, Type.START_BRACKET] +
Type.EXPRESSION_ENDER_TYPES)):
self._HandleError(errors.MISSING_SPACE, 'Missing space before "["',
token, Position.AtBeginning())
def Finalize(self, state, tokenizer_mode):
last_non_space_token = state.GetLastNonSpaceToken()
# Check last line for ending with newline.
@ -780,7 +750,3 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase):
def GetLongLineExceptions(self):
"""Gets a list of regexps for lines which can be longer than the limit."""
return []
def InExplicitlyTypedLanguage(self):
"""Returns whether this ecma implementation is explicitly typed."""
return False

View File

@ -1,90 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2011 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Specific JSLint errors checker."""
import gflags as flags
FLAGS = flags.FLAGS
class Rule(object):
"""Different rules to check."""
# Documentations for specific rules goes in flag definition.
BLANK_LINES_AT_TOP_LEVEL = 'blank_lines_at_top_level'
INDENTATION = 'indentation'
WELL_FORMED_AUTHOR = 'well_formed_author'
NO_BRACES_AROUND_INHERIT_DOC = 'no_braces_around_inherit_doc'
BRACES_AROUND_TYPE = 'braces_around_type'
OPTIONAL_TYPE_MARKER = 'optional_type_marker'
UNUSED_PRIVATE_MEMBERS = 'unused_private_members'
# Rule to raise all known errors.
ALL = 'all'
# All rules that are to be checked when using the strict flag. E.g. the rules
# that are specific to the stricter Closure style.
CLOSURE_RULES = frozenset([BLANK_LINES_AT_TOP_LEVEL,
INDENTATION,
WELL_FORMED_AUTHOR,
NO_BRACES_AROUND_INHERIT_DOC,
BRACES_AROUND_TYPE,
OPTIONAL_TYPE_MARKER])
flags.DEFINE_boolean('strict', False,
'Whether to validate against the stricter Closure style. '
'This includes ' + (', '.join(Rule.CLOSURE_RULES)) + '.')
flags.DEFINE_multistring('jslint_error', [],
'List of specific lint errors to check. Here is a list'
' of accepted values:\n'
' - ' + Rule.ALL + ': enables all following errors.\n'
' - ' + Rule.BLANK_LINES_AT_TOP_LEVEL + ': validates'
'number of blank lines between blocks at top level.\n'
' - ' + Rule.INDENTATION + ': checks correct '
'indentation of code.\n'
' - ' + Rule.WELL_FORMED_AUTHOR + ': validates the '
'@author JsDoc tags.\n'
' - ' + Rule.NO_BRACES_AROUND_INHERIT_DOC + ': '
'forbids braces around @inheritdoc JsDoc tags.\n'
' - ' + Rule.BRACES_AROUND_TYPE + ': enforces braces '
'around types in JsDoc tags.\n'
' - ' + Rule.OPTIONAL_TYPE_MARKER + ': checks correct '
'use of optional marker = in param types.\n'
' - ' + Rule.UNUSED_PRIVATE_MEMBERS + ': checks for '
'unused private variables.\n')
def ShouldCheck(rule):
"""Returns whether the optional rule should be checked.
Computes different flags (strict, jslint_error, jslint_noerror) to find out if
this specific rule should be checked.
Args:
rule: Name of the rule (see Rule).
Returns:
True if the rule should be checked according to the flags, otherwise False.
"""
if rule in FLAGS.jslint_error or Rule.ALL in FLAGS.jslint_error:
return True
# Checks strict rules.
return FLAGS.strict and rule in Rule.CLOSURE_RULES

View File

@ -24,7 +24,6 @@ import gflags as flags
from closure_linter import errors
from closure_linter import javascriptstatetracker
from closure_linter import javascripttokens
from closure_linter import requireprovidesorter
from closure_linter import tokenutil
from closure_linter.common import errorhandler
@ -34,33 +33,20 @@ Type = javascripttokens.JavaScriptTokenType
END_OF_FLAG_TYPE = re.compile(r'(}?\s*)$')
# Regex to represent common mistake inverting author name and email as
# @author User Name (user@company)
INVERTED_AUTHOR_SPEC = re.compile(r'(?P<leading_whitespace>\s*)'
'(?P<name>[^(]+)'
'(?P<whitespace_after_name>\s+)'
'\('
'(?P<email>[^\s]+@[^)\s]+)'
'\)'
'(?P<trailing_characters>.*)')
FLAGS = flags.FLAGS
flags.DEFINE_boolean('disable_indentation_fixing', False,
'Whether to disable automatic fixing of indentation.')
class ErrorFixer(errorhandler.ErrorHandler):
"""Object that fixes simple style errors."""
def __init__(self, external_file=None):
def __init__(self, external_file = None):
"""Initialize the error fixer.
Args:
external_file: If included, all output will be directed to this file
instead of overwriting the files the errors are found in.
"""
errorhandler.ErrorHandler.__init__(self)
self._file_name = None
self._file_token = None
self._external_file = external_file
@ -118,19 +104,6 @@ class ErrorFixer(errorhandler.ErrorHandler):
token.attached_object = javascriptstatetracker.JsDocFlag(token)
self._AddFix(token)
elif code == errors.JSDOC_MISSING_OPTIONAL_TYPE:
iterator = token.attached_object.type_end_token
if iterator.type == Type.DOC_END_BRACE or iterator.string.isspace():
iterator = iterator.previous
ending_space = len(iterator.string) - len(iterator.string.rstrip())
iterator.string = '%s=%s' % (iterator.string.rstrip(),
' ' * ending_space)
# Create a new flag object with updated type info.
token.attached_object = javascriptstatetracker.JsDocFlag(token)
self._AddFix(token)
elif code in (errors.MISSING_SEMICOLON_AFTER_FUNCTION,
errors.MISSING_SEMICOLON):
semicolon_token = Token(';', Type.SEMICOLON, token.line,
@ -176,9 +149,9 @@ class ErrorFixer(errorhandler.ErrorHandler):
elif code == errors.MISSING_LINE:
if error.position.IsAtBeginning():
tokenutil.InsertBlankLineAfter(token.previous)
tokenutil.InsertLineAfter(token.previous)
else:
tokenutil.InsertBlankLineAfter(token)
tokenutil.InsertLineAfter(token)
self._AddFix(token)
elif code == errors.EXTRA_LINE:
@ -194,7 +167,7 @@ class ErrorFixer(errorhandler.ErrorHandler):
should_delete = False
if num_lines < 0:
num_lines *= -1
num_lines = num_lines * -1
should_delete = True
for i in xrange(1, num_lines + 1):
@ -202,17 +175,16 @@ class ErrorFixer(errorhandler.ErrorHandler):
# TODO(user): DeleteToken should update line numbers.
tokenutil.DeleteToken(token.previous)
else:
tokenutil.InsertBlankLineAfter(token.previous)
tokenutil.InsertLineAfter(token.previous)
self._AddFix(token)
elif code == errors.UNNECESSARY_DOUBLE_QUOTED_STRING:
end_quote = tokenutil.Search(token, Type.DOUBLE_QUOTE_STRING_END)
if end_quote:
single_quote_start = Token(
"'", Type.SINGLE_QUOTE_STRING_START, token.line, token.line_number)
single_quote_end = Token(
"'", Type.SINGLE_QUOTE_STRING_START, end_quote.line,
token.line_number)
single_quote_start = Token("'", Type.SINGLE_QUOTE_STRING_START,
token.line, token.line_number)
single_quote_end = Token("'", Type.SINGLE_QUOTE_STRING_START,
end_quote.line, token.line_number)
tokenutil.InsertTokenAfter(single_quote_start, token)
tokenutil.InsertTokenAfter(single_quote_end, end_quote)
@ -225,15 +197,15 @@ class ErrorFixer(errorhandler.ErrorHandler):
start_token = token.attached_object.type_start_token
if start_token.type != Type.DOC_START_BRACE:
leading_space = (
len(start_token.string) - len(start_token.string.lstrip()))
leading_space = (len(start_token.string) -
len(start_token.string.lstrip()))
if leading_space:
start_token = tokenutil.SplitToken(start_token, leading_space)
# Fix case where start and end token were the same.
if token.attached_object.type_end_token == start_token.previous:
token.attached_object.type_end_token = start_token
new_token = Token('{', Type.DOC_START_BRACE, start_token.line,
new_token = Token("{", Type.DOC_START_BRACE, start_token.line,
start_token.line_number)
tokenutil.InsertTokenAfter(new_token, start_token.previous)
token.attached_object.type_start_token = new_token
@ -245,7 +217,7 @@ class ErrorFixer(errorhandler.ErrorHandler):
# FLAG_ENDING_TYPE token, if there wasn't a starting brace then
# the end token is the last token of the actual type.
last_type = end_token
if not fixed_tokens:
if not len(fixed_tokens):
last_type = end_token.previous
while last_type.string.isspace():
@ -261,7 +233,7 @@ class ErrorFixer(errorhandler.ErrorHandler):
tokenutil.SplitToken(last_type,
len(last_type.string) - trailing_space)
new_token = Token('}', Type.DOC_END_BRACE, last_type.line,
new_token = Token("}", Type.DOC_END_BRACE, last_type.line,
last_type.line_number)
tokenutil.InsertTokenAfter(new_token, last_type)
token.attached_object.type_end_token = new_token
@ -269,19 +241,21 @@ class ErrorFixer(errorhandler.ErrorHandler):
self._AddFix(fixed_tokens)
elif code == errors.GOOG_REQUIRES_NOT_ALPHABETIZED:
require_start_token = error.fix_data
sorter = requireprovidesorter.RequireProvideSorter()
sorter.FixRequires(require_start_token)
elif code in (errors.GOOG_REQUIRES_NOT_ALPHABETIZED,
errors.GOOG_PROVIDES_NOT_ALPHABETIZED):
tokens = error.fix_data
strings = map(lambda x: x.string, tokens)
sorted_strings = sorted(strings)
self._AddFix(require_start_token)
index = 0
changed_tokens = []
for token in tokens:
if token.string != sorted_strings[index]:
token.string = sorted_strings[index]
changed_tokens.append(token)
index += 1
elif code == errors.GOOG_PROVIDES_NOT_ALPHABETIZED:
provide_start_token = error.fix_data
sorter = requireprovidesorter.RequireProvideSorter()
sorter.FixProvides(provide_start_token)
self._AddFix(provide_start_token)
self._AddFix(changed_tokens)
elif code == errors.UNNECESSARY_BRACES_AROUND_INHERIT_DOC:
if token.previous.string == '{' and token.next.string == '}':
@ -289,23 +263,13 @@ class ErrorFixer(errorhandler.ErrorHandler):
tokenutil.DeleteToken(token.next)
self._AddFix([token])
elif code == errors.INVALID_AUTHOR_TAG_DESCRIPTION:
match = INVERTED_AUTHOR_SPEC.match(token.string)
if match:
token.string = '%s%s%s(%s)%s' % (match.group('leading_whitespace'),
match.group('email'),
match.group('whitespace_after_name'),
match.group('name'),
match.group('trailing_characters'))
self._AddFix(token)
elif (code == errors.WRONG_INDENTATION and
not FLAGS.disable_indentation_fixing):
not FLAGS.disable_indentation_fixing):
token = tokenutil.GetFirstTokenInSameLine(token)
actual = error.position.start
expected = error.position.length
if token.type in (Type.WHITESPACE, Type.PARAMETERS) and actual != 0:
if token.type in (Type.WHITESPACE, Type.PARAMETERS):
token.string = token.string.lstrip() + (' ' * expected)
self._AddFix([token])
else:
@ -318,102 +282,26 @@ class ErrorFixer(errorhandler.ErrorHandler):
tokenutil.InsertTokenAfter(new_token, token.previous)
self._AddFix([token])
elif code in [errors.MALFORMED_END_OF_SCOPE_COMMENT,
errors.MISSING_END_OF_SCOPE_COMMENT]:
# Only fix cases where }); is found with no trailing content on the line
# other than a comment. Value of 'token' is set to } for this error.
if (token.type == Type.END_BLOCK and
token.next.type == Type.END_PAREN and
token.next.next.type == Type.SEMICOLON):
current_token = token.next.next.next
removed_tokens = []
while current_token and current_token.line_number == token.line_number:
if current_token.IsAnyType(Type.WHITESPACE,
Type.START_SINGLE_LINE_COMMENT,
Type.COMMENT):
removed_tokens.append(current_token)
current_token = current_token.next
else:
return
elif code == errors.EXTRA_GOOG_REQUIRE:
fixed_tokens = []
while token:
if token.type == Type.IDENTIFIER:
if token.string not in ['goog.require', 'goog.provide']:
# Stop iterating over tokens once we're out of the requires and
# provides.
break
if token.string == 'goog.require':
# Text of form: goog.require('required'), skipping past open paren
# and open quote to the string text.
required = token.next.next.next.string
if required in error.fix_data:
fixed_tokens.append(token)
# Want to delete: goog.require + open paren + open single-quote +
# text + close single-quote + close paren + semi-colon = 7.
tokenutil.DeleteTokens(token, 7)
token = token.next
if removed_tokens:
tokenutil.DeleteTokens(removed_tokens[0], len(removed_tokens))
whitespace_token = Token(' ', Type.WHITESPACE, token.line,
token.line_number)
start_comment_token = Token('//', Type.START_SINGLE_LINE_COMMENT,
token.line, token.line_number)
comment_token = Token(' goog.scope', Type.COMMENT, token.line,
token.line_number)
insertion_tokens = [whitespace_token, start_comment_token,
comment_token]
tokenutil.InsertTokensAfter(insertion_tokens, token.next.next)
self._AddFix(removed_tokens + insertion_tokens)
elif code in [errors.EXTRA_GOOG_PROVIDE, errors.EXTRA_GOOG_REQUIRE]:
tokens_in_line = tokenutil.GetAllTokensInSameLine(token)
tokenutil.DeleteTokens(tokens_in_line[0], len(tokens_in_line))
self._AddFix(tokens_in_line)
elif code in [errors.MISSING_GOOG_PROVIDE, errors.MISSING_GOOG_REQUIRE]:
is_provide = code == errors.MISSING_GOOG_PROVIDE
is_require = code == errors.MISSING_GOOG_REQUIRE
missing_namespaces = error.fix_data[0]
need_blank_line = error.fix_data[1]
if need_blank_line is None:
# TODO(user): This happens when there are no existing
# goog.provide or goog.require statements to position new statements
# relative to. Consider handling this case with a heuristic.
return
insert_location = token.previous
# If inserting a missing require with no existing requires, insert a
# blank line first.
if need_blank_line and is_require:
tokenutil.InsertBlankLineAfter(insert_location)
insert_location = insert_location.next
for missing_namespace in missing_namespaces:
new_tokens = self._GetNewRequireOrProvideTokens(
is_provide, missing_namespace, insert_location.line_number + 1)
tokenutil.InsertLineAfter(insert_location, new_tokens)
insert_location = new_tokens[-1]
self._AddFix(new_tokens)
# If inserting a missing provide with no existing provides, insert a
# blank line after.
if need_blank_line and is_provide:
tokenutil.InsertBlankLineAfter(insert_location)
def _GetNewRequireOrProvideTokens(self, is_provide, namespace, line_number):
"""Returns a list of tokens to create a goog.require/provide statement.
Args:
is_provide: True if getting tokens for a provide, False for require.
namespace: The required or provided namespaces to get tokens for.
line_number: The line number the new require or provide statement will be
on.
Returns:
Tokens to create a new goog.require or goog.provide statement.
"""
string = 'goog.require'
if is_provide:
string = 'goog.provide'
line_text = string + '(\'' + namespace + '\');\n'
return [
Token(string, Type.IDENTIFIER, line_text, line_number),
Token('(', Type.START_PAREN, line_text, line_number),
Token('\'', Type.SINGLE_QUOTE_STRING_START, line_text, line_number),
Token(namespace, Type.STRING_TEXT, line_text, line_number),
Token('\'', Type.SINGLE_QUOTE_STRING_END, line_text, line_number),
Token(')', Type.END_PAREN, line_text, line_number),
Token(';', Type.SEMICOLON, line_text, line_number)
]
self._AddFix(fixed_tokens)
def FinishFile(self):
"""Called when the current file has finished style checking.
@ -423,7 +311,7 @@ class ErrorFixer(errorhandler.ErrorHandler):
if self._file_fix_count:
f = self._external_file
if not f:
print 'Fixed %d errors in %s' % (self._file_fix_count, self._file_name)
print "Fixed %d errors in %s" % (self._file_fix_count, self._file_name)
f = open(self._file_name, 'w')
token = self._file_token
@ -435,10 +323,11 @@ class ErrorFixer(errorhandler.ErrorHandler):
if token.IsLastInLine():
f.write('\n')
if char_count > 80 and token.line_number in self._file_changed_lines:
print 'WARNING: Line %d of %s is now longer than 80 characters.' % (
print "WARNING: Line %d of %s is now longer than 80 characters." % (
token.line_number, self._file_name)
char_count = 0
self._file_changed_lines
token = token.next

View File

@ -1,65 +0,0 @@
#!/usr/bin/env python
# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple, pickle-serializable class to represent a lint error."""
import gflags as flags
from closure_linter import errors
from closure_linter.common import erroroutput
FLAGS = flags.FLAGS
class ErrorRecord(object):
"""Record-keeping struct that can be serialized back from a process.
Attributes:
path: Path to the file.
error_string: Error string for the user.
new_error: Whether this is a "new error" (see errors.NEW_ERRORS).
"""
def __init__(self, path, error_string, new_error):
self.path = path
self.error_string = error_string
self.new_error = new_error
def MakeErrorRecord(path, error):
"""Make an error record with correctly formatted error string.
Errors are not able to be serialized (pickled) over processes because of
their pointers to the complex token/context graph. We use an intermediary
serializable class to pass back just the relevant information.
Args:
path: Path of file the error was found in.
error: An error.Error instance.
Returns:
_ErrorRecord instance.
"""
new_error = error.code in errors.NEW_ERRORS
if FLAGS.unix_mode:
error_string = erroroutput.GetUnixErrorOutput(path, error, new_error)
else:
error_string = erroroutput.GetErrorOutput(error, new_error)
return ErrorRecord(path, error_string, new_error)

View File

@ -19,7 +19,6 @@
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)')
def ByName(name):
"""Get the error code for the given error name.
@ -58,7 +57,6 @@ LINE_STARTS_WITH_OPERATOR = 120
COMMA_AT_END_OF_LITERAL = 121
MULTI_LINE_STRING = 130
UNNECESSARY_DOUBLE_QUOTED_STRING = 131
UNUSED_PRIVATE_MEMBER = 132
# Requires, provides
GOOG_REQUIRES_NOT_ALPHABETIZED = 140
@ -66,7 +64,6 @@ GOOG_PROVIDES_NOT_ALPHABETIZED = 141
MISSING_GOOG_REQUIRE = 142
MISSING_GOOG_PROVIDE = 143
EXTRA_GOOG_REQUIRE = 144
EXTRA_GOOG_PROVIDE = 145
# JsDoc
INVALID_JSDOC_TAG = 200
@ -92,8 +89,6 @@ UNNECESSARY_BRACES_AROUND_INHERIT_DOC = 226
INVALID_AUTHOR_TAG_DESCRIPTION = 227
JSDOC_PREFER_QUESTION_TO_PIPE_NULL = 230
JSDOC_ILLEGAL_QUESTION_WITH_PIPE = 231
JSDOC_MISSING_OPTIONAL_TYPE = 232
JSDOC_MISSING_OPTIONAL_PREFIX = 233
JSDOC_TAG_DESCRIPTION_ENDS_WITH_INVALID_CHARACTER = 240
# TODO(robbyw): Split this in to more specific syntax problems.
INCORRECT_SUPPRESS_SYNTAX = 250
@ -108,10 +103,6 @@ FILE_IN_BLOCK = 301
INTERFACE_CONSTRUCTOR_CANNOT_HAVE_PARAMS = 400
INTERFACE_METHOD_CANNOT_HAVE_CODE = 401
# Comments
MISSING_END_OF_SCOPE_COMMENT = 500
MALFORMED_END_OF_SCOPE_COMMENT = 501
# ActionScript specific errors:
# TODO(user): move these errors to their own file and move all JavaScript
# specific errors to their own file as well.
@ -134,9 +125,7 @@ NEW_ERRORS = frozenset([
# Errors added after 2.0.2:
WRONG_INDENTATION,
MISSING_SEMICOLON,
# Errors added after 2.3.4:
MISSING_END_OF_SCOPE_COMMENT,
MALFORMED_END_OF_SCOPE_COMMENT,
UNUSED_PRIVATE_MEMBER,
# Errors added after 2.3.5:
# Errors added after 2.2.5:
WRONG_BLANK_LINE_COUNT,
EXTRA_GOOG_REQUIRE,
])

View File

@ -1,5 +1,4 @@
#!/usr/bin/env python
# python2.6 for command-line runs using p4lib. pylint: disable-msg=C6301
#
# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
#
@ -26,11 +25,6 @@ from closure_linter import checker
from closure_linter import error_fixer
from closure_linter.common import simplefileflags as fileflags
FLAGS = flags.FLAGS
flags.DEFINE_list('additional_extensions', None, 'List of additional file '
'extensions (not js) that should be treated as '
'JavaScript files.')
def main(argv = None):
"""Main function.
@ -41,11 +35,7 @@ def main(argv = None):
if argv is None:
argv = flags.FLAGS(sys.argv)
suffixes = ['.js']
if FLAGS.additional_extensions:
suffixes += ['.%s' % ext for ext in FLAGS.additional_extensions]
files = fileflags.GetFileList(argv, 'JavaScript', suffixes)
files = fileflags.GetFileList(argv, 'JavaScript', ['.js'])
style_checker = checker.JavaScriptStyleChecker(error_fixer.ErrorFixer())

View File

@ -31,223 +31,30 @@ flags.FLAGS.strict = True
flags.FLAGS.limited_doc_files = ('dummy.js', 'externs.js')
flags.FLAGS.closurized_namespaces = ('goog', 'dummy')
class FixJsStyleTest(googletest.TestCase):
"""Test case to for gjslint auto-fixing."""
def testFixJsStyle(self):
test_cases = [['fixjsstyle.in.js', 'fixjsstyle.out.js'],
['indentation.js', 'fixjsstyle.indentation.out.js']]
for [running_input_file, running_output_file] in test_cases:
input_filename = None
golden_filename = None
current_filename = None
try:
input_filename = '%s/%s' % (_RESOURCE_PREFIX, running_input_file)
current_filename = input_filename
input_filename = None
try:
input_filename = '%s/fixjsstyle.in.js' % (_RESOURCE_PREFIX)
golden_filename = '%s/%s' % (_RESOURCE_PREFIX, running_output_file)
current_filename = golden_filename
except IOError, ex:
raise IOError('Could not find testdata resource for %s: %s' %
(current_filename, ex))
if running_input_file == 'fixjsstyle.in.js':
with open(input_filename) as f:
for line in f:
# Go to last line.
pass
self.assertTrue(line == line.rstrip(), '%s file should not end '
'with a new line.' % (input_filename))
# Autofix the file, sending output to a fake file.
actual = StringIO.StringIO()
style_checker = checker.JavaScriptStyleChecker(
error_fixer.ErrorFixer(actual))
style_checker.Check(input_filename)
# Now compare the files.
actual.seek(0)
expected = open(golden_filename, 'r')
self.assertEqual(actual.readlines(), expected.readlines())
def testMissingExtraAndUnsortedRequires(self):
"""Tests handling of missing extra and unsorted goog.require statements."""
original = [
"goog.require('dummy.aa');",
"goog.require('dummy.Cc');",
"goog.require('dummy.Dd');",
"",
"var x = new dummy.Bb();",
"dummy.Cc.someMethod();",
"dummy.aa.someMethod();",
]
expected = [
"goog.require('dummy.Bb');",
"goog.require('dummy.Cc');",
"goog.require('dummy.aa');",
"",
"var x = new dummy.Bb();",
"dummy.Cc.someMethod();",
"dummy.aa.someMethod();",
]
self._AssertFixes(original, expected)
def testMissingExtraAndUnsortedProvides(self):
"""Tests handling of missing extra and unsorted goog.provide statements."""
original = [
"goog.provide('dummy.aa');",
"goog.provide('dummy.Cc');",
"goog.provide('dummy.Dd');",
"",
"dummy.Cc = function() {};",
"dummy.Bb = function() {};",
"dummy.aa.someMethod = function();",
]
expected = [
"goog.provide('dummy.Bb');",
"goog.provide('dummy.Cc');",
"goog.provide('dummy.aa');",
"",
"dummy.Cc = function() {};",
"dummy.Bb = function() {};",
"dummy.aa.someMethod = function();",
]
self._AssertFixes(original, expected)
def testNoRequires(self):
"""Tests positioning of missing requires without existing requires."""
original = [
"goog.provide('dummy.Something');",
"",
"dummy.Something = function() {};",
"",
"var x = new dummy.Bb();",
]
expected = [
"goog.provide('dummy.Something');",
"",
"goog.require('dummy.Bb');",
"",
"dummy.Something = function() {};",
"",
"var x = new dummy.Bb();",
]
self._AssertFixes(original, expected)
def testNoProvides(self):
"""Tests positioning of missing provides without existing provides."""
original = [
"goog.require('dummy.Bb');",
"",
"dummy.Something = function() {};",
"",
"var x = new dummy.Bb();",
]
expected = [
"goog.provide('dummy.Something');",
"",
"goog.require('dummy.Bb');",
"",
"dummy.Something = function() {};",
"",
"var x = new dummy.Bb();",
]
self._AssertFixes(original, expected)
def testGoogScopeIndentation(self):
"""Tests Handling a typical end-of-scope indentation fix."""
original = [
'goog.scope(function() {',
' // TODO(brain): Take over the world.',
'}); // goog.scope',
]
expected = [
'goog.scope(function() {',
'// TODO(brain): Take over the world.',
'}); // goog.scope',
]
self._AssertFixes(original, expected)
def testMissingEndOfScopeComment(self):
"""Tests Handling a missing comment at end of goog.scope."""
original = [
'goog.scope(function() {',
'});',
]
expected = [
'goog.scope(function() {',
'}); // goog.scope',
]
self._AssertFixes(original, expected)
def testMissingEndOfScopeCommentWithOtherComment(self):
"""Tests handling an irrelevant comment at end of goog.scope."""
original = [
'goog.scope(function() {',
"}); // I don't belong here!",
]
expected = [
'goog.scope(function() {',
'}); // goog.scope',
]
self._AssertFixes(original, expected)
def testMalformedEndOfScopeComment(self):
"""Tests Handling a malformed comment at end of goog.scope."""
original = [
'goog.scope(function() {',
'}); // goog.scope FTW',
]
expected = [
'goog.scope(function() {',
'}); // goog.scope',
]
self._AssertFixes(original, expected)
def _AssertFixes(self, original, expected):
"""Asserts that the error fixer corrects original to expected."""
original = self._GetHeader() + original
expected = self._GetHeader() + expected
golden_filename = '%s/fixjsstyle.out.js' % (_RESOURCE_PREFIX)
except IOError, ex:
raise IOError('Could not find testdata resource for %s: %s' %
(self._filename, ex))
# Autofix the file, sending output to a fake file.
actual = StringIO.StringIO()
style_checker = checker.JavaScriptStyleChecker(
error_fixer.ErrorFixer(actual))
style_checker.CheckLines('testing.js', original, False)
style_checker.Check(input_filename)
# Now compare the files.
actual.seek(0)
expected = open(golden_filename, 'r')
expected = [x + '\n' for x in expected]
self.assertListEqual(actual.readlines(), expected)
def _GetHeader(self):
"""Returns a fake header for a JavaScript file."""
return [
"// Copyright 2011 Google Inc. All Rights Reserved.",
"",
"/**",
" * @fileoverview Fake file overview.",
" * @author fake@google.com (Fake Person)",
" */",
""
]
self.assertEqual(actual.readlines(), expected.readlines())
if __name__ == '__main__':

View File

@ -33,7 +33,6 @@ import unittest as googletest
from closure_linter import checker
from closure_linter import errors
from closure_linter import error_check
from closure_linter.common import filetestcase
_RESOURCE_PREFIX = 'closure_linter/testdata'
@ -41,50 +40,36 @@ _RESOURCE_PREFIX = 'closure_linter/testdata'
flags.FLAGS.strict = True
flags.FLAGS.custom_jsdoc_tags = ('customtag', 'requires')
flags.FLAGS.closurized_namespaces = ('goog', 'dummy')
flags.FLAGS.limited_doc_files = ('externs.js', 'dummy.js',
'limited_doc_checks.js')
flags.FLAGS.jslint_error = error_check.Rule.ALL
flags.FLAGS.limited_doc_files = ('externs.js', 'dummy.js')
# List of files under testdata to test.
# We need to list files explicitly since pyglib can't list directories.
# TODO(user): Figure out how to list the directory.
_TEST_FILES = [
'all_js_wrapped.js',
'blank_lines.js',
'ends_with_block.js',
'externs.js',
'externs_jsdoc.js',
'goog_scope.js',
'html_parse_error.html',
'indentation.js',
'interface.js',
'jsdoc.js',
'limited_doc_checks.js',
'minimal.js',
'other.js',
'provide_blank.js',
'provide_extra.js',
'provide_missing.js',
'require_all_caps.js',
'require_blank.js',
'require_extra.js',
'require_function.js',
'require_function_missing.js',
'require_function_through_both.js',
'require_function_through_namespace.js',
'require_interface.js',
'require_interface_base.js',
'require_lower_case.js',
'require_missing.js',
'require_numeric.js',
'require_provide_blank.js',
'require_provide_ok.js',
'require_provide_missing.js',
'simple.html',
'spaces.js',
'tokenizer.js',
'unparseable.js',
'unused_private_members.js',
'utf8.html'
]

View File

@ -1,5 +1,4 @@
#!/usr/bin/env python
# python2.6 for command-line runs using p4lib. pylint: disable-msg=C6301
#
# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
#
@ -36,24 +35,15 @@ is in tokenizer.py and checker.py.
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)')
import functools
import itertools
import sys
import time
from closure_linter import checker
from closure_linter import errors
from closure_linter.common import errorprinter
from closure_linter.common import simplefileflags as fileflags
import gflags as flags
from closure_linter import checker
from closure_linter import errorrecord
from closure_linter.common import erroraccumulator
from closure_linter.common import simplefileflags as fileflags
# Attempt import of multiprocessing (should be available in Python 2.6 and up).
try:
# pylint: disable-msg=C6204
import multiprocessing
except ImportError:
multiprocessing = None
FLAGS = flags.FLAGS
flags.DEFINE_boolean('unix_mode', False,
@ -64,144 +54,12 @@ flags.DEFINE_boolean('check_html', False,
'Whether to check javascript in html files.')
flags.DEFINE_boolean('summary', False,
'Whether to show an error count summary.')
flags.DEFINE_list('additional_extensions', None, 'List of additional file '
'extensions (not js) that should be treated as '
'JavaScript files.')
flags.DEFINE_boolean('multiprocess', False,
'Whether to parallalize linting using the '
'multiprocessing module. Disabled by default.')
GJSLINT_ONLY_FLAGS = ['--unix_mode', '--beep', '--nobeep', '--time',
'--check_html', '--summary']
def _MultiprocessCheckPaths(paths):
"""Run _CheckPath over mutltiple processes.
Tokenization, passes, and checks are expensive operations. Running in a
single process, they can only run on one CPU/core. Instead,
shard out linting over all CPUs with multiprocessing to parallelize.
Args:
paths: paths to check.
Yields:
errorrecord.ErrorRecords for any found errors.
"""
pool = multiprocessing.Pool()
for results in pool.imap(_CheckPath, paths):
for record in results:
yield record
pool.close()
pool.join()
def _CheckPaths(paths):
"""Run _CheckPath on all paths in one thread.
Args:
paths: paths to check.
Yields:
errorrecord.ErrorRecords for any found errors.
"""
for path in paths:
results = _CheckPath(path)
for record in results:
yield record
def _CheckPath(path):
"""Check a path and return any errors.
Args:
path: paths to check.
Returns:
A list of errorrecord.ErrorRecords for any found errors.
"""
error_accumulator = erroraccumulator.ErrorAccumulator()
style_checker = checker.JavaScriptStyleChecker(error_accumulator)
style_checker.Check(path)
# Return any errors as error records.
make_error_record = functools.partial(errorrecord.MakeErrorRecord, path)
return map(make_error_record, error_accumulator.GetErrors())
def _GetFilePaths(argv):
suffixes = ['.js']
if FLAGS.additional_extensions:
suffixes += ['.%s' % ext for ext in FLAGS.additional_extensions]
if FLAGS.check_html:
suffixes += ['.html', '.htm']
return fileflags.GetFileList(argv, 'JavaScript', suffixes)
# Error printing functions
def _PrintFileSummary(paths, records):
"""Print a detailed summary of the number of errors in each file."""
paths = list(paths)
paths.sort()
for path in paths:
path_errors = [e for e in records if e.path == path]
print '%s: %d' % (path, len(path_errors))
def _PrintFileSeparator(path):
print '----- FILE : %s -----' % path
def _PrintSummary(paths, error_records):
"""Print a summary of the number of errors and files."""
error_count = len(error_records)
all_paths = set(paths)
all_paths_count = len(all_paths)
if error_count is 0:
print '%d files checked, no errors found.' % all_paths_count
new_error_count = len([e for e in error_records if e.new_error])
error_paths = set([e.path for e in error_records])
error_paths_count = len(error_paths)
no_error_paths_count = all_paths_count - error_paths_count
if error_count or new_error_count:
print ('Found %d errors, including %d new errors, in %d files '
'(%d files OK).' % (
error_count,
new_error_count,
error_paths_count,
no_error_paths_count))
def _PrintErrorRecords(error_records):
"""Print error records strings in the expected format."""
current_path = None
for record in error_records:
if current_path != record.path:
current_path = record.path
if not FLAGS.unix_mode:
_PrintFileSeparator(current_path)
print record.error_string
def _FormatTime(t):
def FormatTime(t):
"""Formats a duration as a human-readable string.
Args:
@ -224,41 +82,33 @@ def main(argv = None):
"""
if argv is None:
argv = flags.FLAGS(sys.argv)
if FLAGS.time:
start_time = time.time()
start_time = time.time()
suffixes = ['.js']
if FLAGS.additional_extensions:
suffixes += ['.%s' % ext for ext in FLAGS.additional_extensions]
if FLAGS.check_html:
suffixes += ['.html', '.htm']
paths = fileflags.GetFileList(argv, 'JavaScript', suffixes)
files = fileflags.GetFileList(argv, 'JavaScript', suffixes)
if FLAGS.multiprocess:
records_iter = _MultiprocessCheckPaths(paths)
else:
records_iter = _CheckPaths(paths)
error_handler = None
if FLAGS.unix_mode:
error_handler = errorprinter.ErrorPrinter(errors.NEW_ERRORS)
error_handler.SetFormat(errorprinter.UNIX_FORMAT)
records_iter, records_iter_copy = itertools.tee(records_iter, 2)
_PrintErrorRecords(records_iter_copy)
error_records = list(records_iter)
_PrintSummary(paths, error_records)
runner = checker.GJsLintRunner()
result = runner.Run(files, error_handler)
result.PrintSummary()
exit_code = 0
# If there are any errors
if error_records:
if result.HasOldErrors():
exit_code += 1
# If there are any new errors
if [r for r in error_records if r.new_error]:
if result.HasNewErrors():
exit_code += 2
if exit_code:
if FLAGS.summary:
_PrintFileSummary(paths, error_records)
result.PrintFileSummary()
if FLAGS.beep:
# Make a beep noise.
@ -279,10 +129,11 @@ Some of the errors reported by GJsLint may be auto-fixable using the script
fixjsstyle. Please double check any changes it makes and report any bugs. The
script can be run by executing:
fixjsstyle %s """ % ' '.join(fix_args)
fixjsstyle %s
""" % ' '.join(fix_args)
if FLAGS.time:
print 'Done in %s.' % _FormatTime(time.time() - start_time)
print 'Done in %s.' % FormatTime(time.time() - start_time)
sys.exit(exit_code)

View File

@ -152,31 +152,7 @@ class IndentationRules(object):
self._PopTo(Type.START_BRACKET)
elif token_type == Type.END_BLOCK:
start_token = self._PopTo(Type.START_BLOCK)
# Check for required goog.scope comment.
if start_token:
goog_scope = self._GoogScopeOrNone(start_token.token)
if goog_scope is not None:
if not token.line.endswith('; // goog.scope\n'):
if (token.line.find('//') > -1 and
token.line.find('goog.scope') >
token.line.find('//')):
indentation_errors.append([
errors.MALFORMED_END_OF_SCOPE_COMMENT,
('Malformed end of goog.scope comment. Please use the '
'exact following syntax to close the scope:\n'
'}); // goog.scope'),
token,
Position(token.start_index, token.length)])
else:
indentation_errors.append([
errors.MISSING_END_OF_SCOPE_COMMENT,
('Missing comment for end of goog.scope which opened at line '
'%d. End the scope with:\n'
'}); // goog.scope' %
(start_token.line_number)),
token,
Position(token.start_index, token.length)])
self._PopTo(Type.START_BLOCK)
elif token_type == Type.KEYWORD and token.string in ('case', 'default'):
self._Add(self._PopTo(Type.START_BLOCK))
@ -236,7 +212,7 @@ class IndentationRules(object):
elif token_type == Type.START_BLOCK or token.metadata.is_implied_block:
self._Add(TokenInfo(token=token, is_block=True))
elif token_type in (Type.START_PAREN, Type.START_PARAMETERS):
elif token_type in (Type.START_PAREN, Type.START_PARAMETERS):
self._Add(TokenInfo(token=token, is_block=False))
elif token_type == Type.KEYWORD and token.string == 'return':
@ -447,27 +423,6 @@ class IndentationRules(object):
if token.type not in Type.NON_CODE_TYPES:
return False
def _GoogScopeOrNone(self, token):
"""Determines if the given START_BLOCK is part of a goog.scope statement.
Args:
token: A token of type START_BLOCK.
Returns:
The goog.scope function call token, or None if such call doesn't exist.
"""
# Search for a goog.scope statement, which will be 5 tokens before the
# block. Illustration of the tokens found prior to the start block:
# goog.scope(function() {
# 5 4 3 21 ^
maybe_goog_scope = token
for unused_i in xrange(5):
maybe_goog_scope = (maybe_goog_scope.previous if maybe_goog_scope and
maybe_goog_scope.previous else None)
if maybe_goog_scope and maybe_goog_scope.string == 'goog.scope':
return maybe_goog_scope
def _Add(self, token_info):
"""Adds the given token info to the stack.
@ -479,7 +434,6 @@ class IndentationRules(object):
return
if token_info.is_block or token_info.token.type == Type.START_PAREN:
token_info.overridden_by = self._GoogScopeOrNone(token_info.token)
index = 1
while index <= len(self._stack):
stack_info = self._stack[-index]

View File

@ -1,6 +1,6 @@
#!/usr/bin/env python
#
# Copyright 2011 The Closure Linter Authors. All Rights Reserved.
# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -24,36 +24,32 @@ __author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)',
'jacobr@google.com (Jacob Richman)')
import re
from sets import Set
import gflags as flags
from closure_linter import ecmalintrules
from closure_linter import error_check
from closure_linter import errors
from closure_linter import javascripttokenizer
from closure_linter import javascripttokens
from closure_linter import requireprovidesorter
from closure_linter import tokenutil
from closure_linter.common import error
from closure_linter.common import position
FLAGS = flags.FLAGS
flags.DEFINE_list('closurized_namespaces', '',
'Namespace prefixes, used for testing of'
'goog.provide/require')
flags.DEFINE_list('ignored_extra_namespaces', '',
'Fully qualified namespaces that should be not be reported '
'as extra by the linter.')
# Shorthand
Error = error.Error
Position = position.Position
Rule = error_check.Rule
Type = javascripttokens.JavaScriptTokenType
class JavaScriptLintRules(ecmalintrules.EcmaScriptLintRules):
"""JavaScript lint rules that catch JavaScript specific style errors."""
def __init__(self, namespaces_info):
"""Initializes a JavaScriptLintRules instance."""
ecmalintrules.EcmaScriptLintRules.__init__(self)
self._namespaces_info = namespaces_info
self._declared_private_member_tokens = {}
self._declared_private_members = Set()
self._used_private_members = Set()
def HandleMissingParameterDoc(self, token, param_name):
"""Handle errors associated with a parameter missing a param tag."""
self._HandleError(errors.MISSING_PARAMETER_DOCUMENTATION,
@ -64,17 +60,14 @@ class JavaScriptLintRules(ecmalintrules.EcmaScriptLintRules):
Args:
token: The token being checked
Returns:
True if the token contains a record type, False otherwise.
"""
# If we see more than one left-brace in the string of an annotation token,
# then there's a record type in there.
return (
token and token.type == Type.DOC_FLAG and
return (token and token.type == Type.DOC_FLAG and
token.attached_object.type is not None and
token.attached_object.type.find('{') != token.string.rfind('{'))
def CheckToken(self, token, state):
"""Checks a token, given the current parser_state, for warnings and errors.
@ -92,68 +85,23 @@ class JavaScriptLintRules(ecmalintrules.EcmaScriptLintRules):
super(JavaScriptLintRules, self).CheckToken(token, state)
# Store some convenience variables
namespaces_info = self._namespaces_info
first_in_line = token.IsFirstInLine()
last_in_line = token.IsLastInLine()
type = token.type
if error_check.ShouldCheck(Rule.UNUSED_PRIVATE_MEMBERS):
# Find all assignments to private members.
if token.type == Type.SIMPLE_LVALUE:
identifier = token.string
if identifier.endswith('_') and not identifier.endswith('__'):
doc_comment = state.GetDocComment()
suppressed = (doc_comment and doc_comment.HasFlag('suppress') and
doc_comment.GetFlag('suppress').type == 'underscore')
if not suppressed:
# Look for static members defined on a provided namespace.
namespace = namespaces_info.GetClosurizedNamespace(identifier)
provided_namespaces = namespaces_info.GetProvidedNamespaces()
# Skip cases of this.something_.somethingElse_.
regex = re.compile('^this\.[a-zA-Z_]+$')
if namespace in provided_namespaces or regex.match(identifier):
variable = identifier.split('.')[-1]
self._declared_private_member_tokens[variable] = token
self._declared_private_members.add(variable)
elif not identifier.endswith('__'):
# Consider setting public members of private members to be a usage.
for piece in identifier.split('.'):
if piece.endswith('_'):
self._used_private_members.add(piece)
# Find all usages of private members.
if token.type == Type.IDENTIFIER:
for piece in token.string.split('.'):
if piece.endswith('_'):
self._used_private_members.add(piece)
if token.type == Type.DOC_FLAG:
if type == Type.DOC_FLAG:
flag = token.attached_object
if flag.flag_type == 'param' and flag.name_token is not None:
self._CheckForMissingSpaceBeforeToken(
token.attached_object.name_token)
if (error_check.ShouldCheck(Rule.OPTIONAL_TYPE_MARKER) and
flag.type is not None and flag.name is not None):
# Check for optional marker in type.
if (flag.type.endswith('=') and
not flag.name.startswith('opt_')):
self._HandleError(errors.JSDOC_MISSING_OPTIONAL_PREFIX,
'Optional parameter name %s must be prefixed '
'with opt_.' % flag.name,
token)
elif (not flag.type.endswith('=') and
flag.name.startswith('opt_')):
self._HandleError(errors.JSDOC_MISSING_OPTIONAL_TYPE,
'Optional parameter %s type must end with =.' %
flag.name,
token)
if flag.flag_type in state.GetDocFlag().HAS_TYPE:
# Check for both missing type token and empty type braces '{}'
# Missing suppress types are reported separately and we allow enums
# without types.
if (flag.flag_type not in ('suppress', 'enum') and
(not flag.type or flag.type.isspace())):
(flag.type == None or flag.type == '' or flag.type.isspace())):
self._HandleError(errors.MISSING_JSDOC_TAG_TYPE,
'Missing type in %s tag' % token.string, token)
@ -164,13 +112,13 @@ class JavaScriptLintRules(ecmalintrules.EcmaScriptLintRules):
'Type should be immediately after %s tag' % token.string,
token)
elif token.type == Type.DOUBLE_QUOTE_STRING_START:
next_token = token.next
while next_token.type == Type.STRING_TEXT:
elif type == Type.DOUBLE_QUOTE_STRING_START:
next = token.next
while next.type == Type.STRING_TEXT:
if javascripttokenizer.JavaScriptTokenizer.SINGLE_QUOTE.search(
next_token.string):
next.string):
break
next_token = next_token.next
next = next.next
else:
self._HandleError(
errors.UNNECESSARY_DOUBLE_QUOTED_STRING,
@ -178,20 +126,13 @@ class JavaScriptLintRules(ecmalintrules.EcmaScriptLintRules):
token,
Position.All(token.string))
elif token.type == Type.END_DOC_COMMENT:
doc_comment = state.GetDocComment()
# When @externs appears in a @fileoverview comment, it should trigger
# the same limited doc checks as a special filename like externs.js.
if doc_comment.HasFlag('fileoverview') and doc_comment.HasFlag('externs'):
self._SetLimitedDocChecks(True)
if (error_check.ShouldCheck(Rule.BLANK_LINES_AT_TOP_LEVEL) and
not self._is_html and state.InTopLevel() and not state.InBlock()):
elif type == Type.END_DOC_COMMENT:
if (FLAGS.strict and not self._is_html and state.InTopLevel() and
not state.InBlock()):
# Check if we're in a fileoverview or constructor JsDoc.
is_constructor = (
doc_comment.HasFlag('constructor') or
doc_comment = state.GetDocComment()
is_constructor = (doc_comment.HasFlag('constructor') or
doc_comment.HasFlag('interface'))
is_file_overview = doc_comment.HasFlag('fileoverview')
@ -199,16 +140,9 @@ class JavaScriptLintRules(ecmalintrules.EcmaScriptLintRules):
# precede some code, skip it.
# NOTE: The tokenutil methods are not used here because of their
# behavior at the top of a file.
next_token = token.next
if (not next_token or
(not is_file_overview and next_token.type in Type.NON_CODE_TYPES)):
return
# Don't require extra blank lines around suppression of extra
# goog.require errors.
if (doc_comment.SuppressionOnly() and
next_token.type == Type.IDENTIFIER and
next_token.string in ['goog.provide', 'goog.require']):
next = token.next
if (not next or
(not is_file_overview and next.type in Type.NON_CODE_TYPES)):
return
# Find the start of this block (include comments above the block, unless
@ -240,20 +174,19 @@ class JavaScriptLintRules(ecmalintrules.EcmaScriptLintRules):
error_message = 'Should have a blank line before a file overview.'
expected_blank_lines = 1
elif is_constructor and blank_lines != 3:
error_message = (
'Should have 3 blank lines before a constructor/interface.')
error_message = ('Should have 3 blank lines before a constructor/'
'interface.')
expected_blank_lines = 3
elif not is_file_overview and not is_constructor and blank_lines != 2:
error_message = 'Should have 2 blank lines between top-level blocks.'
expected_blank_lines = 2
if error_message:
self._HandleError(
errors.WRONG_BLANK_LINE_COUNT, error_message,
self._HandleError(errors.WRONG_BLANK_LINE_COUNT, error_message,
block_start, Position.AtBeginning(),
expected_blank_lines - blank_lines)
elif token.type == Type.END_BLOCK:
elif type == Type.END_BLOCK:
if state.InFunction() and state.IsFunctionClose():
is_immediately_called = (token.next and
token.next.type == Type.START_PAREN)
@ -270,9 +203,7 @@ class JavaScriptLintRules(ecmalintrules.EcmaScriptLintRules):
errors.MISSING_RETURN_DOCUMENTATION,
'Missing @return JsDoc in function with non-trivial return',
function.doc.end_token, Position.AtBeginning())
elif (not function.has_return and
not function.has_throw and
function.doc and
elif (not function.has_return and function.doc and
function.doc.HasFlag('return') and
not state.InInterfaceMethod()):
return_flag = function.doc.GetFlag('return')
@ -301,7 +232,7 @@ class JavaScriptLintRules(ecmalintrules.EcmaScriptLintRules):
'constructor with @constructor)',
function.doc.end_token, Position.AtBeginning())
elif token.type == Type.IDENTIFIER:
elif type == Type.IDENTIFIER:
if token.string == 'goog.inherits' and not state.InFunction():
if state.GetLastNonSpaceToken().line_number == token.line_number:
self._HandleError(
@ -322,71 +253,7 @@ class JavaScriptLintRules(ecmalintrules.EcmaScriptLintRules):
# TODO(robbyw): Test the last function was a constructor.
# TODO(robbyw): Test correct @extends and @implements documentation.
elif (token.string == 'goog.provide' and
not state.InFunction() and
namespaces_info is not None):
namespace = tokenutil.Search(token, Type.STRING_TEXT).string
# Report extra goog.provide statement.
if namespaces_info.IsExtraProvide(token):
self._HandleError(
errors.EXTRA_GOOG_PROVIDE,
'Unnecessary goog.provide: ' + namespace,
token, position=Position.AtBeginning())
if namespaces_info.IsLastProvide(token):
# Report missing provide statements after the last existing provide.
missing_provides = namespaces_info.GetMissingProvides()
if missing_provides:
self._ReportMissingProvides(
missing_provides,
tokenutil.GetLastTokenInSameLine(token).next,
False)
# If there are no require statements, missing requires should be
# reported after the last provide.
if not namespaces_info.GetRequiredNamespaces():
missing_requires = namespaces_info.GetMissingRequires()
if missing_requires:
self._ReportMissingRequires(
missing_requires,
tokenutil.GetLastTokenInSameLine(token).next,
True)
elif (token.string == 'goog.require' and
not state.InFunction() and
namespaces_info is not None):
namespace = tokenutil.Search(token, Type.STRING_TEXT).string
# If there are no provide statements, missing provides should be
# reported before the first require.
if (namespaces_info.IsFirstRequire(token) and
not namespaces_info.GetProvidedNamespaces()):
missing_provides = namespaces_info.GetMissingProvides()
if missing_provides:
self._ReportMissingProvides(
missing_provides,
tokenutil.GetFirstTokenInSameLine(token),
True)
# Report extra goog.require statement.
if namespaces_info.IsExtraRequire(token):
self._HandleError(
errors.EXTRA_GOOG_REQUIRE,
'Unnecessary goog.require: ' + namespace,
token, position=Position.AtBeginning())
# Report missing goog.require statements.
if namespaces_info.IsLastRequire(token):
missing_requires = namespaces_info.GetMissingRequires()
if missing_requires:
self._ReportMissingRequires(
missing_requires,
tokenutil.GetLastTokenInSameLine(token).next,
False)
elif token.type == Type.OPERATOR:
last_in_line = token.IsLastInLine()
elif type == Type.OPERATOR:
# If the token is unary and appears to be used in a unary context
# it's ok. Otherwise, if it's at the end of the line or immediately
# before a comment, it's ok.
@ -403,9 +270,7 @@ class JavaScriptLintRules(ecmalintrules.EcmaScriptLintRules):
'Missing space after "%s"' % token.string,
token,
Position.AtEnd(token.string))
elif token.type == Type.WHITESPACE:
first_in_line = token.IsFirstInLine()
last_in_line = token.IsLastInLine()
elif type == Type.WHITESPACE:
# Check whitespace length if it's not the first token of the line and
# if it's not immediately before a comment.
if not last_in_line and not first_in_line and not token.next.IsComment():
@ -419,120 +284,112 @@ class JavaScriptLintRules(ecmalintrules.EcmaScriptLintRules):
token,
Position.All(token.string))
def _ReportMissingProvides(self, missing_provides, token, need_blank_line):
"""Reports missing provide statements to the error handler.
Args:
missing_provides: A list of strings where each string is a namespace that
should be provided, but is not.
token: The token where the error was detected (also where the new provides
will be inserted.
need_blank_line: Whether a blank line needs to be inserted after the new
provides are inserted. May be True, False, or None, where None
indicates that the insert location is unknown.
"""
self._HandleError(
errors.MISSING_GOOG_PROVIDE,
'Missing the following goog.provide statements:\n' +
'\n'.join(map(lambda x: 'goog.provide(\'%s\');' % x,
sorted(missing_provides))),
token, position=Position.AtBeginning(),
fix_data=(missing_provides, need_blank_line))
def _ReportMissingRequires(self, missing_requires, token, need_blank_line):
"""Reports missing require statements to the error handler.
Args:
missing_requires: A list of strings where each string is a namespace that
should be required, but is not.
token: The token where the error was detected (also where the new requires
will be inserted.
need_blank_line: Whether a blank line needs to be inserted before the new
requires are inserted. May be True, False, or None, where None
indicates that the insert location is unknown.
"""
self._HandleError(
errors.MISSING_GOOG_REQUIRE,
'Missing the following goog.require statements:\n' +
'\n'.join(map(lambda x: 'goog.require(\'%s\');' % x,
sorted(missing_requires))),
token, position=Position.AtBeginning(),
fix_data=(missing_requires, need_blank_line))
def Finalize(self, state, tokenizer_mode):
"""Perform all checks that need to occur after all lines are processed."""
# Call the base class's Finalize function.
super(JavaScriptLintRules, self).Finalize(state, tokenizer_mode)
if error_check.ShouldCheck(Rule.UNUSED_PRIVATE_MEMBERS):
# Report an error for any declared private member that was never used.
unused_private_members = (self._declared_private_members -
self._used_private_members)
# Check for sorted requires statements.
goog_require_tokens = state.GetGoogRequireTokens()
requires = [require_token.string for require_token in goog_require_tokens]
sorted_requires = sorted(requires)
index = 0
bad = False
for item in requires:
if item != sorted_requires[index]:
bad = True
break
index += 1
for variable in unused_private_members:
token = self._declared_private_member_tokens[variable]
self._HandleError(errors.UNUSED_PRIVATE_MEMBER,
'Unused private member: %s.' % token.string,
token)
# Clear state to prepare for the next file.
self._declared_private_member_tokens = {}
self._declared_private_members = Set()
self._used_private_members = Set()
namespaces_info = self._namespaces_info
if namespaces_info is not None:
# If there are no provide or require statements, missing provides and
# requires should be reported on line 1.
if (not namespaces_info.GetProvidedNamespaces() and
not namespaces_info.GetRequiredNamespaces()):
missing_provides = namespaces_info.GetMissingProvides()
if missing_provides:
self._ReportMissingProvides(
missing_provides, state.GetFirstToken(), None)
missing_requires = namespaces_info.GetMissingRequires()
if missing_requires:
self._ReportMissingRequires(
missing_requires, state.GetFirstToken(), None)
self._CheckSortedRequiresProvides(state.GetFirstToken())
def _CheckSortedRequiresProvides(self, token):
"""Checks that all goog.require and goog.provide statements are sorted.
Note that this method needs to be run after missing statements are added to
preserve alphabetical order.
Args:
token: The first token in the token stream.
"""
sorter = requireprovidesorter.RequireProvideSorter()
provides_result = sorter.CheckProvides(token)
if provides_result:
self._HandleError(
errors.GOOG_PROVIDES_NOT_ALPHABETIZED,
'goog.provide classes must be alphabetized. The correct code is:\n' +
'\n'.join(
map(lambda x: 'goog.provide(\'%s\');' % x, provides_result[1])),
provides_result[0],
position=Position.AtBeginning(),
fix_data=provides_result[0])
requires_result = sorter.CheckRequires(token)
if requires_result:
if bad:
self._HandleError(
errors.GOOG_REQUIRES_NOT_ALPHABETIZED,
'goog.require classes must be alphabetized. The correct code is:\n' +
'\n'.join(
map(lambda x: 'goog.require(\'%s\');' % x, requires_result[1])),
requires_result[0],
'\n'.join(map(lambda x: 'goog.require(\'%s\');' % x,
sorted_requires)),
goog_require_tokens[index],
position=Position.AtBeginning(),
fix_data=requires_result[0])
fix_data=goog_require_tokens)
# Check for sorted provides statements.
goog_provide_tokens = state.GetGoogProvideTokens()
provides = [provide_token.string for provide_token in goog_provide_tokens]
sorted_provides = sorted(provides)
index = 0
bad = False
for item in provides:
if item != sorted_provides[index]:
bad = True
break
index += 1
if bad:
self._HandleError(
errors.GOOG_PROVIDES_NOT_ALPHABETIZED,
'goog.provide classes must be alphabetized. The correct code is:\n' +
'\n'.join(map(lambda x: 'goog.provide(\'%s\');' % x,
sorted_provides)),
goog_provide_tokens[index],
position=Position.AtBeginning(),
fix_data=goog_provide_tokens)
if FLAGS.closurized_namespaces:
# Check that we provide everything we need.
provided_namespaces = state.GetProvidedNamespaces()
missing_provides = provided_namespaces - set(provides)
if missing_provides:
self._HandleError(
errors.MISSING_GOOG_PROVIDE,
'Missing the following goog.provide statements:\n' +
'\n'.join(map(lambda x: 'goog.provide(\'%s\');' % x,
sorted(missing_provides))),
state.GetFirstToken(), position=Position.AtBeginning(),
fix_data=missing_provides)
# Compose a set of all available namespaces. Explicitly omit goog
# because if you can call goog.require, you already have goog.
available_namespaces = (set(requires) | set(provides) | set(['goog']) |
provided_namespaces)
# Check that we require everything we need.
missing_requires = set()
for namespace_variants in state.GetUsedNamespaces():
# Namespace variants is a list of potential things to require. If we
# find we're missing one, we are lazy and choose to require the first
# in the sequence - which should be the namespace.
if not set(namespace_variants) & available_namespaces:
missing_requires.add(namespace_variants[0])
if missing_requires:
self._HandleError(
errors.MISSING_GOOG_REQUIRE,
'Missing the following goog.require statements:\n' +
'\n'.join(map(lambda x: 'goog.require(\'%s\');' % x,
sorted(missing_requires))),
state.GetFirstToken(), position=Position.AtBeginning(),
fix_data=missing_requires)
# Check that we don't require things we don't actually use.
namespace_variants = state.GetUsedNamespaces()
used_namespaces = set()
for a, b in namespace_variants:
used_namespaces.add(a)
used_namespaces.add(b)
extra_requires = set()
for i in requires:
baseNamespace = i.split('.')[0]
if (i not in used_namespaces and
baseNamespace in FLAGS.closurized_namespaces and
i not in FLAGS.ignored_extra_namespaces):
extra_requires.add(i)
if extra_requires:
self._HandleError(
errors.EXTRA_GOOG_REQUIRE,
'The following goog.require statements appear unnecessary:\n' +
'\n'.join(map(lambda x: 'goog.require(\'%s\');' % x,
sorted(extra_requires))),
state.GetFirstToken(), position=Position.AtBeginning(),
fix_data=extra_requires)
def GetLongLineExceptions(self):
"""Gets a list of regexps for lines which can be longer than the limit."""
return [
re.compile('goog\.require\(.+\);?\s*$'),
re.compile('goog\.provide\(.+\);?\s*$')
]

View File

@ -50,7 +50,7 @@ class JsDocFlag(statetracker.DocFlag):
# TODO(robbyw): determine which of these, if any, should be illegal.
EXTENDED_DOC = frozenset([
'class', 'code', 'desc', 'final', 'hidden', 'inheritDoc', 'link',
'meaning', 'protected', 'notypecheck', 'throws'])
'protected', 'notypecheck', 'throws'])
LEGAL_DOC = EXTENDED_DOC | statetracker.DocFlag.LEGAL_DOC
@ -70,9 +70,24 @@ class JavaScriptStateTracker(statetracker.StateTracker):
functionality needed for JavaScript.
"""
def __init__(self):
"""Initializes a JavaScript token stream state tracker."""
def __init__(self, closurized_namespaces=''):
"""Initializes a JavaScript token stream state tracker.
Args:
closurized_namespaces: An optional list of namespace prefixes used for
testing of goog.provide/require.
"""
statetracker.StateTracker.__init__(self, JsDocFlag)
self.__closurized_namespaces = closurized_namespaces
def Reset(self):
"""Resets the state tracker to prepare for processing a new page."""
super(JavaScriptStateTracker, self).Reset()
self.__goog_require_tokens = []
self.__goog_provide_tokens = []
self.__provided_namespaces = set()
self.__used_namespaces = []
def InTopLevel(self):
"""Compute whether we are at the top level in the class.
@ -87,6 +102,22 @@ class JavaScriptStateTracker(statetracker.StateTracker):
"""
return not self.InParentheses()
def GetGoogRequireTokens(self):
"""Returns list of require tokens."""
return self.__goog_require_tokens
def GetGoogProvideTokens(self):
"""Returns list of provide tokens."""
return self.__goog_provide_tokens
def GetProvidedNamespaces(self):
"""Returns list of provided namespaces."""
return self.__provided_namespaces
def GetUsedNamespaces(self):
"""Returns list of used namespaces, is a list of sequences."""
return self.__used_namespaces
def GetBlockType(self, token):
"""Determine the block type given a START_BLOCK token.
@ -114,3 +145,94 @@ class JavaScriptStateTracker(statetracker.StateTracker):
"""
super(JavaScriptStateTracker, self).HandleToken(token,
last_non_space_token)
if token.IsType(Type.IDENTIFIER):
if token.string == 'goog.require':
class_token = tokenutil.Search(token, Type.STRING_TEXT)
self.__goog_require_tokens.append(class_token)
elif token.string == 'goog.provide':
class_token = tokenutil.Search(token, Type.STRING_TEXT)
self.__goog_provide_tokens.append(class_token)
elif self.__closurized_namespaces:
self.__AddUsedNamespace(token.string)
if token.IsType(Type.SIMPLE_LVALUE) and not self.InFunction():
identifier = token.values['identifier']
if self.__closurized_namespaces:
namespace = self.GetClosurizedNamespace(identifier)
if namespace and identifier == namespace:
self.__provided_namespaces.add(namespace)
if (self.__closurized_namespaces and
token.IsType(Type.DOC_FLAG) and
token.attached_object.flag_type == 'implements'):
# Interfaces should be goog.require'd.
doc_start = tokenutil.Search(token, Type.DOC_START_BRACE)
interface = tokenutil.Search(doc_start, Type.COMMENT)
self.__AddUsedNamespace(interface.string)
def __AddUsedNamespace(self, identifier):
"""Adds the namespace of an identifier to the list of used namespaces.
Args:
identifier: An identifier which has been used.
"""
namespace = self.GetClosurizedNamespace(identifier)
if namespace:
# We add token.string as a 'namespace' as it is something that could
# potentially be provided to satisfy this dependency.
self.__used_namespaces.append([namespace, identifier])
def GetClosurizedNamespace(self, identifier):
"""Given an identifier, returns the namespace that identifier is from.
Args:
identifier: The identifier to extract a namespace from.
Returns:
The namespace the given identifier resides in, or None if one could not
be found.
"""
parts = identifier.split('.')
for part in parts:
if part.endswith('_'):
# Ignore private variables / inner classes.
return None
if identifier.startswith('goog.global'):
# Ignore goog.global, since it is, by definition, global.
return None
for namespace in self.__closurized_namespaces:
if identifier.startswith(namespace + '.'):
last_part = parts[-1]
if not last_part:
# TODO(robbyw): Handle this: it's a multi-line identifier.
return None
if last_part in ('apply', 'inherits', 'call'):
# Calling one of Function's methods usually indicates use of a
# superclass.
parts.pop()
last_part = parts[-1]
for i in xrange(1, len(parts)):
part = parts[i]
if part.isupper():
# If an identifier is of the form foo.bar.BAZ.x or foo.bar.BAZ,
# the namespace is foo.bar.
return '.'.join(parts[:i])
if part == 'prototype':
# If an identifier is of the form foo.bar.prototype.x, the
# namespace is foo.bar.
return '.'.join(parts[:i])
if last_part.isupper() or not last_part[0].isupper():
# Strip off the last part of an enum or constant reference.
parts.pop()
return '.'.join(parts)
return None

View File

@ -0,0 +1,53 @@
#!/usr/bin/env python
#
# Copyright 2010 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for JavaScriptStateTracker."""
import unittest as googletest
from closure_linter import javascriptstatetracker
class JavaScriptStateTrackerTest(googletest.TestCase):
__test_cases = {
'package.CONSTANT' : 'package',
'package.methodName' : 'package',
'package.subpackage.methodName' : 'package.subpackage',
'package.ClassName.something' : 'package.ClassName',
'package.ClassName.Enum.VALUE.methodName' : 'package.ClassName.Enum',
'package.ClassName.CONSTANT' : 'package.ClassName',
'package.ClassName.inherits' : 'package.ClassName',
'package.ClassName.apply' : 'package.ClassName',
'package.ClassName.methodName.apply' : 'package.ClassName',
'package.ClassName.methodName.call' : 'package.ClassName',
'package.ClassName.prototype.methodName' : 'package.ClassName',
'package.ClassName.privateMethod_' : None,
'package.ClassName.prototype.methodName.apply' : 'package.ClassName'
}
def testGetClosurizedNamespace(self):
stateTracker = javascriptstatetracker.JavaScriptStateTracker(['package'])
for identifier, expected_namespace in self.__test_cases.items():
actual_namespace = stateTracker.GetClosurizedNamespace(identifier)
self.assertEqual(expected_namespace, actual_namespace,
'expected namespace "' + str(expected_namespace) +
'" for identifier "' + str(identifier) + '" but was "' +
str(actual_namespace) + '"')
if __name__ == '__main__':
googletest.main()

View File

@ -51,7 +51,7 @@ class JavaScriptTokenizer(tokenizer.Tokenizer):
"""
# Useful patterns for JavaScript parsing.
IDENTIFIER_CHAR = r'A-Za-z0-9_$.'
IDENTIFIER_CHAR = r'A-Za-z0-9_$.';
# Number patterns based on:
# http://www.mozilla.org/js/language/js20-2000-07/formal/lexer-grammar.html
@ -201,9 +201,7 @@ class JavaScriptTokenizer(tokenizer.Tokenizer):
Matcher(DOC_INLINE_FLAG, Type.DOC_INLINE_FLAG),
Matcher(DOC_FLAG_LEX_SPACES, Type.DOC_FLAG,
JavaScriptModes.DOC_COMMENT_LEX_SPACES_MODE),
# Encountering a doc flag should leave lex spaces mode.
Matcher(DOC_FLAG, Type.DOC_FLAG, JavaScriptModes.DOC_COMMENT_MODE),
Matcher(DOC_FLAG, Type.DOC_FLAG),
# Tokenize braces so we can find types.
Matcher(START_BLOCK, Type.DOC_START_BRACE),
@ -216,112 +214,116 @@ class JavaScriptTokenizer(tokenizer.Tokenizer):
# returned. Hence the order is important because the matchers that come first
# overrule the matchers that come later.
JAVASCRIPT_MATCHERS = {
# Matchers for basic text mode.
JavaScriptModes.TEXT_MODE: [
# Check a big group - strings, starting comments, and regexes - all
# of which could be intertwined. 'string with /regex/',
# /regex with 'string'/, /* comment with /regex/ and string */ (and so
# on)
Matcher(START_DOC_COMMENT, Type.START_DOC_COMMENT,
JavaScriptModes.DOC_COMMENT_MODE),
Matcher(START_BLOCK_COMMENT, Type.START_BLOCK_COMMENT,
JavaScriptModes.BLOCK_COMMENT_MODE),
Matcher(END_OF_LINE_SINGLE_LINE_COMMENT,
Type.START_SINGLE_LINE_COMMENT),
Matcher(START_SINGLE_LINE_COMMENT, Type.START_SINGLE_LINE_COMMENT,
JavaScriptModes.LINE_COMMENT_MODE),
Matcher(SINGLE_QUOTE, Type.SINGLE_QUOTE_STRING_START,
JavaScriptModes.SINGLE_QUOTE_STRING_MODE),
Matcher(DOUBLE_QUOTE, Type.DOUBLE_QUOTE_STRING_START,
JavaScriptModes.DOUBLE_QUOTE_STRING_MODE),
Matcher(REGEX, Type.REGEX),
# Matchers for basic text mode.
JavaScriptModes.TEXT_MODE: [
# Check a big group - strings, starting comments, and regexes - all
# of which could be intertwined. 'string with /regex/',
# /regex with 'string'/, /* comment with /regex/ and string */ (and so on)
Matcher(START_DOC_COMMENT, Type.START_DOC_COMMENT,
JavaScriptModes.DOC_COMMENT_MODE),
Matcher(START_BLOCK_COMMENT, Type.START_BLOCK_COMMENT,
JavaScriptModes.BLOCK_COMMENT_MODE),
Matcher(END_OF_LINE_SINGLE_LINE_COMMENT,
Type.START_SINGLE_LINE_COMMENT),
Matcher(START_SINGLE_LINE_COMMENT, Type.START_SINGLE_LINE_COMMENT,
JavaScriptModes.LINE_COMMENT_MODE),
Matcher(SINGLE_QUOTE, Type.SINGLE_QUOTE_STRING_START,
JavaScriptModes.SINGLE_QUOTE_STRING_MODE),
Matcher(DOUBLE_QUOTE, Type.DOUBLE_QUOTE_STRING_START,
JavaScriptModes.DOUBLE_QUOTE_STRING_MODE),
Matcher(REGEX, Type.REGEX),
# Next we check for start blocks appearing outside any of the items
# above.
Matcher(START_BLOCK, Type.START_BLOCK),
Matcher(END_BLOCK, Type.END_BLOCK),
# Next we check for start blocks appearing outside any of the items above.
Matcher(START_BLOCK, Type.START_BLOCK),
Matcher(END_BLOCK, Type.END_BLOCK),
# Then we search for function declarations.
Matcher(FUNCTION_DECLARATION, Type.FUNCTION_DECLARATION,
JavaScriptModes.FUNCTION_MODE),
# Then we search for function declarations.
Matcher(FUNCTION_DECLARATION, Type.FUNCTION_DECLARATION,
JavaScriptModes.FUNCTION_MODE),
# Next, we convert non-function related parens to tokens.
Matcher(OPENING_PAREN, Type.START_PAREN),
Matcher(CLOSING_PAREN, Type.END_PAREN),
# Next, we convert non-function related parens to tokens.
Matcher(OPENING_PAREN, Type.START_PAREN),
Matcher(CLOSING_PAREN, Type.END_PAREN),
# Next, we convert brackets to tokens.
Matcher(OPENING_BRACKET, Type.START_BRACKET),
Matcher(CLOSING_BRACKET, Type.END_BRACKET),
# Next, we convert brackets to tokens.
Matcher(OPENING_BRACKET, Type.START_BRACKET),
Matcher(CLOSING_BRACKET, Type.END_BRACKET),
# Find numbers. This has to happen before operators because scientific
# notation numbers can have + and - in them.
Matcher(NUMBER, Type.NUMBER),
# Find numbers. This has to happen before operators because scientific
# notation numbers can have + and - in them.
Matcher(NUMBER, Type.NUMBER),
# Find operators and simple assignments
Matcher(SIMPLE_LVALUE, Type.SIMPLE_LVALUE),
Matcher(OPERATOR, Type.OPERATOR),
# Find operators and simple assignments
Matcher(SIMPLE_LVALUE, Type.SIMPLE_LVALUE),
Matcher(OPERATOR, Type.OPERATOR),
# Find key words and whitespace.
Matcher(KEYWORD, Type.KEYWORD),
Matcher(WHITESPACE, Type.WHITESPACE),
# Find key words and whitespace
Matcher(KEYWORD, Type.KEYWORD),
Matcher(WHITESPACE, Type.WHITESPACE),
# Find identifiers.
Matcher(IDENTIFIER, Type.IDENTIFIER),
# Find identifiers
Matcher(IDENTIFIER, Type.IDENTIFIER),
# Finally, we convert semicolons to tokens.
Matcher(SEMICOLON, Type.SEMICOLON)],
# Finally, we convert semicolons to tokens.
Matcher(SEMICOLON, Type.SEMICOLON)],
# Matchers for single quote strings.
JavaScriptModes.SINGLE_QUOTE_STRING_MODE: [
Matcher(SINGLE_QUOTE_TEXT, Type.STRING_TEXT),
Matcher(SINGLE_QUOTE, Type.SINGLE_QUOTE_STRING_END,
JavaScriptModes.TEXT_MODE)],
# Matchers for double quote strings.
JavaScriptModes.DOUBLE_QUOTE_STRING_MODE: [
Matcher(DOUBLE_QUOTE_TEXT, Type.STRING_TEXT),
Matcher(DOUBLE_QUOTE, Type.DOUBLE_QUOTE_STRING_END,
JavaScriptModes.TEXT_MODE)],
# Matchers for single quote strings.
JavaScriptModes.SINGLE_QUOTE_STRING_MODE: [
Matcher(SINGLE_QUOTE_TEXT, Type.STRING_TEXT),
Matcher(SINGLE_QUOTE, Type.SINGLE_QUOTE_STRING_END,
JavaScriptModes.TEXT_MODE)],
# Matchers for block comments.
JavaScriptModes.BLOCK_COMMENT_MODE: [
# First we check for exiting a block comment.
Matcher(END_BLOCK_COMMENT, Type.END_BLOCK_COMMENT,
JavaScriptModes.TEXT_MODE),
# Match non-comment-ending text..
Matcher(BLOCK_COMMENT_TEXT, Type.COMMENT)],
# Matchers for double quote strings.
JavaScriptModes.DOUBLE_QUOTE_STRING_MODE: [
Matcher(DOUBLE_QUOTE_TEXT, Type.STRING_TEXT),
Matcher(DOUBLE_QUOTE, Type.DOUBLE_QUOTE_STRING_END,
JavaScriptModes.TEXT_MODE)],
# Matchers for doc comments.
JavaScriptModes.DOC_COMMENT_MODE: COMMON_DOC_MATCHERS + [
Matcher(DOC_COMMENT_TEXT, Type.COMMENT)],
JavaScriptModes.DOC_COMMENT_LEX_SPACES_MODE: COMMON_DOC_MATCHERS + [
Matcher(WHITESPACE, Type.COMMENT),
Matcher(DOC_COMMENT_NO_SPACES_TEXT, Type.COMMENT)],
# Matchers for block comments.
JavaScriptModes.BLOCK_COMMENT_MODE: [
# First we check for exiting a block comment.
Matcher(END_BLOCK_COMMENT, Type.END_BLOCK_COMMENT,
JavaScriptModes.TEXT_MODE),
# Matchers for single line comments.
JavaScriptModes.LINE_COMMENT_MODE: [
# We greedy match until the end of the line in line comment mode.
Matcher(ANYTHING, Type.COMMENT, JavaScriptModes.TEXT_MODE)],
# Match non-comment-ending text..
Matcher(BLOCK_COMMENT_TEXT, Type.COMMENT)],
# Matchers for code after the function keyword.
JavaScriptModes.FUNCTION_MODE: [
# Must match open paren before anything else and move into parameter
# mode, otherwise everything inside the parameter list is parsed
# incorrectly.
Matcher(OPENING_PAREN, Type.START_PARAMETERS,
JavaScriptModes.PARAMETER_MODE),
Matcher(WHITESPACE, Type.WHITESPACE),
Matcher(IDENTIFIER, Type.FUNCTION_NAME)],
# Matchers for function parameters
JavaScriptModes.PARAMETER_MODE: [
# When in function parameter mode, a closing paren is treated specially.
# Everything else is treated as lines of parameters.
Matcher(CLOSING_PAREN_WITH_SPACE, Type.END_PARAMETERS,
JavaScriptModes.TEXT_MODE),
Matcher(PARAMETERS, Type.PARAMETERS, JavaScriptModes.PARAMETER_MODE)]}
# Matchers for doc comments.
JavaScriptModes.DOC_COMMENT_MODE: COMMON_DOC_MATCHERS + [
Matcher(DOC_COMMENT_TEXT, Type.COMMENT)],
JavaScriptModes.DOC_COMMENT_LEX_SPACES_MODE: COMMON_DOC_MATCHERS + [
Matcher(WHITESPACE, Type.COMMENT),
Matcher(DOC_COMMENT_NO_SPACES_TEXT, Type.COMMENT)],
# Matchers for single line comments.
JavaScriptModes.LINE_COMMENT_MODE: [
# We greedy match until the end of the line in line comment mode.
Matcher(ANYTHING, Type.COMMENT, JavaScriptModes.TEXT_MODE)],
# Matchers for code after the function keyword.
JavaScriptModes.FUNCTION_MODE: [
# Must match open paren before anything else and move into parameter mode,
# otherwise everything inside the parameter list is parsed incorrectly.
Matcher(OPENING_PAREN, Type.START_PARAMETERS,
JavaScriptModes.PARAMETER_MODE),
Matcher(WHITESPACE, Type.WHITESPACE),
Matcher(IDENTIFIER, Type.FUNCTION_NAME)],
# Matchers for function parameters
JavaScriptModes.PARAMETER_MODE: [
# When in function parameter mode, a closing paren is treated specially.
# Everything else is treated as lines of parameters.
Matcher(CLOSING_PAREN_WITH_SPACE, Type.END_PARAMETERS,
JavaScriptModes.TEXT_MODE),
Matcher(PARAMETERS, Type.PARAMETERS, JavaScriptModes.PARAMETER_MODE)]}
# When text is not matched, it is given this default type based on mode.
# If unspecified in this map, the default default is Type.NORMAL.

View File

@ -1,74 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2011 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for gjslint --nostrict.
Tests errors that can be thrown by gjslint when not in strict mode.
"""
import os
import sys
import unittest
import gflags as flags
import unittest as googletest
from closure_linter import checker
from closure_linter import errors
from closure_linter.common import filetestcase
_RESOURCE_PREFIX = 'closure_linter/testdata'
flags.FLAGS.strict = False
flags.FLAGS.custom_jsdoc_tags = ('customtag', 'requires')
flags.FLAGS.closurized_namespaces = ('goog', 'dummy')
flags.FLAGS.limited_doc_files = ('externs.js', 'dummy.js',
'limited_doc_checks.js')
# List of files under testdata to test.
# We need to list files explicitly since pyglib can't list directories.
_TEST_FILES = [
'not_strict.js'
]
class GJsLintTestSuite(unittest.TestSuite):
"""Test suite to run a GJsLintTest for each of several files.
If sys.argv[1:] is non-empty, it is interpreted as a list of filenames in
testdata to test. Otherwise, _TEST_FILES is used.
"""
def __init__(self, tests=()):
unittest.TestSuite.__init__(self, tests)
argv = sys.argv and sys.argv[1:] or []
if argv:
test_files = argv
else:
test_files = _TEST_FILES
for test_file in test_files:
resource_path = os.path.join(_RESOURCE_PREFIX, test_file)
self.addTest(filetestcase.AnnotatedFileTestCase(resource_path,
checker.GJsLintRunner(),
errors.ByName))
if __name__ == '__main__':
# Don't let main parse args; it happens in the TestSuite.
googletest.main(argv=sys.argv[0:1], defaultTest='GJsLintTestSuite')

View File

@ -1,272 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2011 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains logic for sorting goog.provide and goog.require statements.
Closurized JavaScript files use goog.provide and goog.require statements at the
top of the file to manage dependencies. These statements should be sorted
alphabetically, however, it is common for them to be accompanied by inline
comments or suppression annotations. In order to sort these statements without
disrupting their comments and annotations, the association between statements
and comments/annotations must be maintained while sorting.
RequireProvideSorter: Handles checking/fixing of provide/require statements.
"""
from closure_linter import javascripttokens
from closure_linter import tokenutil
# Shorthand
Type = javascripttokens.JavaScriptTokenType
class RequireProvideSorter(object):
"""Checks for and fixes alphabetization of provide and require statements.
When alphabetizing, comments on the same line or comments directly above a
goog.provide or goog.require statement are associated with that statement and
stay with the statement as it gets sorted.
"""
def CheckProvides(self, token):
"""Checks alphabetization of goog.provide statements.
Iterates over tokens in given token stream, identifies goog.provide tokens,
and checks that they occur in alphabetical order by the object being
provided.
Args:
token: A token in the token stream before any goog.provide tokens.
Returns:
A tuple containing the first provide token in the token stream and a list
of provided objects sorted alphabetically. For example:
(JavaScriptToken, ['object.a', 'object.b', ...])
None is returned if all goog.provide statements are already sorted.
"""
provide_tokens = self._GetRequireOrProvideTokens(token, 'goog.provide')
provide_strings = self._GetRequireOrProvideTokenStrings(provide_tokens)
sorted_provide_strings = sorted(provide_strings)
if provide_strings != sorted_provide_strings:
return [provide_tokens[0], sorted_provide_strings]
return None
def CheckRequires(self, token):
"""Checks alphabetization of goog.require statements.
Iterates over tokens in given token stream, identifies goog.require tokens,
and checks that they occur in alphabetical order by the dependency being
required.
Args:
token: A token in the token stream before any goog.require tokens.
Returns:
A tuple containing the first require token in the token stream and a list
of required dependencies sorted alphabetically. For example:
(JavaScriptToken, ['object.a', 'object.b', ...])
None is returned if all goog.require statements are already sorted.
"""
require_tokens = self._GetRequireOrProvideTokens(token, 'goog.require')
require_strings = self._GetRequireOrProvideTokenStrings(require_tokens)
sorted_require_strings = sorted(require_strings)
if require_strings != sorted_require_strings:
return (require_tokens[0], sorted_require_strings)
return None
def FixProvides(self, token):
"""Sorts goog.provide statements in the given token stream alphabetically.
Args:
token: The first token in the token stream.
"""
self._FixProvidesOrRequires(
self._GetRequireOrProvideTokens(token, 'goog.provide'))
def FixRequires(self, token):
"""Sorts goog.require statements in the given token stream alphabetically.
Args:
token: The first token in the token stream.
"""
self._FixProvidesOrRequires(
self._GetRequireOrProvideTokens(token, 'goog.require'))
def _FixProvidesOrRequires(self, tokens):
"""Sorts goog.provide or goog.require statements.
Args:
tokens: A list of goog.provide or goog.require tokens in the order they
appear in the token stream. i.e. the first token in this list must
be the first goog.provide or goog.require token.
"""
strings = self._GetRequireOrProvideTokenStrings(tokens)
sorted_strings = sorted(strings)
# Make a separate pass to remove any blank lines between goog.require/
# goog.provide tokens.
first_token = tokens[0]
last_token = tokens[-1]
i = last_token
while i != first_token:
if i.type is Type.BLANK_LINE:
tokenutil.DeleteToken(i)
i = i.previous
# A map from required/provided object name to tokens that make up the line
# it was on, including any comments immediately before it or after it on the
# same line.
tokens_map = self._GetTokensMap(tokens)
# Iterate over the map removing all tokens.
for name in tokens_map:
tokens_to_delete = tokens_map[name]
for i in tokens_to_delete:
tokenutil.DeleteToken(i)
# Re-add all tokens in the map in alphabetical order.
insert_after = tokens[0].previous
for string in sorted_strings:
for i in tokens_map[string]:
tokenutil.InsertTokenAfter(i, insert_after)
insert_after = i
def _GetRequireOrProvideTokens(self, token, token_string):
"""Gets all goog.provide or goog.require tokens in the given token stream.
Args:
token: The first token in the token stream.
token_string: One of 'goog.provide' or 'goog.require' to indicate which
tokens to find.
Returns:
A list of goog.provide or goog.require tokens in the order they appear in
the token stream.
"""
tokens = []
while token:
if token.type == Type.IDENTIFIER:
if token.string == token_string:
tokens.append(token)
elif token.string not in ['goog.require', 'goog.provide']:
# The goog.provide and goog.require identifiers are at the top of the
# file. So if any other identifier is encountered, return.
break
token = token.next
return tokens
def _GetRequireOrProvideTokenStrings(self, tokens):
"""Gets a list of strings corresponding to the given list of tokens.
The string will be the next string in the token stream after each token in
tokens. This is used to find the object being provided/required by a given
goog.provide or goog.require token.
Args:
tokens: A list of goog.provide or goog.require tokens.
Returns:
A list of object names that are being provided or required by the given
list of tokens. For example:
['object.a', 'object.c', 'object.b']
"""
token_strings = []
for token in tokens:
name = tokenutil.Search(token, Type.STRING_TEXT).string
token_strings.append(name)
return token_strings
def _GetTokensMap(self, tokens):
"""Gets a map from object name to tokens associated with that object.
Starting from the goog.provide/goog.require token, searches backwards in the
token stream for any lines that start with a comment. These lines are
associated with the goog.provide/goog.require token. Also associates any
tokens on the same line as the goog.provide/goog.require token with that
token.
Args:
tokens: A list of goog.provide or goog.require tokens.
Returns:
A dictionary that maps object names to the tokens associated with the
goog.provide or goog.require of that object name. For example:
{
'object.a': [JavaScriptToken, JavaScriptToken, ...],
'object.b': [...]
}
The list of tokens includes any comment lines above the goog.provide or
goog.require statement and everything after the statement on the same
line. For example, all of the following would be associated with
'object.a':
/** @suppress {extraRequire} */
goog.require('object.a'); // Some comment.
"""
tokens_map = {}
for token in tokens:
object_name = tokenutil.Search(token, Type.STRING_TEXT).string
# If the previous line starts with a comment, presume that the comment
# relates to the goog.require or goog.provide and keep them together when
# sorting.
first_token = token
previous_first_token = tokenutil.GetFirstTokenInPreviousLine(first_token)
while previous_first_token.IsAnyType(Type.COMMENT_TYPES):
first_token = previous_first_token
previous_first_token = tokenutil.GetFirstTokenInPreviousLine(
first_token)
# Find the last token on the line.
last_token = tokenutil.GetLastTokenInSameLine(token)
all_tokens = self._GetTokenList(first_token, last_token)
tokens_map[object_name] = all_tokens
return tokens_map
def _GetTokenList(self, first_token, last_token):
"""Gets a list of all tokens from first_token to last_token, inclusive.
Args:
first_token: The first token to get.
last_token: The last token to get.
Returns:
A list of all tokens between first_token and last_token, including both
first_token and last_token.
Raises:
Exception: If the token stream ends before last_token is reached.
"""
token_list = []
token = first_token
while token != last_token:
if not token:
raise Exception('ran out of tokens')
token_list.append(token)
token = token.next
token_list.append(last_token)
return token_list

View File

@ -1,74 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for RequireProvideSorter."""
import unittest as googletest
from closure_linter import ecmametadatapass
from closure_linter import javascripttokenizer
from closure_linter import javascripttokens
from closure_linter import requireprovidesorter
# pylint: disable-msg=C6409
TokenType = javascripttokens.JavaScriptTokenType
class RequireProvideSorterTest(googletest.TestCase):
"""Tests for RequireProvideSorter."""
_tokenizer = javascripttokenizer.JavaScriptTokenizer()
_metadata_pass = ecmametadatapass.EcmaMetaDataPass()
def testFixRequires_removeBlankLines(self):
"""Tests that blank lines are omitted in sorted goog.require statements."""
input_lines = [
'goog.provide(\'package.subpackage.Whatever\');',
'',
'goog.require(\'package.subpackage.ClassB\');',
'',
'goog.require(\'package.subpackage.ClassA\');'
]
expected_lines = [
'goog.provide(\'package.subpackage.Whatever\');',
'',
'goog.require(\'package.subpackage.ClassA\');',
'goog.require(\'package.subpackage.ClassB\');'
]
token = self._tokenizer.TokenizeFile(input_lines)
self._metadata_pass.Reset()
self._metadata_pass.Process(token)
sorter = requireprovidesorter.RequireProvideSorter()
sorter.FixRequires(token)
self.assertEquals(expected_lines, self._GetLines(token))
def _GetLines(self, token):
"""Returns an array of lines based on the specified token stream."""
lines = []
line = ''
while token:
line += token.string
if token.IsLastInLine():
lines.append(line)
line = ''
token = token.next
return lines
if __name__ == '__main__':
googletest.main()

View File

@ -65,7 +65,6 @@ class DocFlag(object):
'implements',
'implicitCast',
'interface',
'lends',
'license',
'noalias',
'nocompile',
@ -90,38 +89,18 @@ class DocFlag(object):
# Includes all Closure Compiler @suppress types.
# Not all of these annotations are interpreted by Closure Linter.
#
# Specific cases:
# - accessControls is supported by the compiler at the expression
# and method level to suppress warnings about private/protected
# access (method level applies to all references in the method).
# The linter mimics the compiler behavior.
SUPPRESS_TYPES = frozenset([
'accessControls',
'ambiguousFunctionDecl',
'checkRegExp',
'checkTypes',
'checkVars',
'const',
'constantProperty',
'deprecated',
'duplicate',
'es5Strict',
'externsValidation',
'extraProvide',
'extraRequire',
'fileoverviewTags',
'globalThis',
'internetExplorerChecks',
'invalidCasts',
'missingProperties',
'missingProvide',
'missingRequire',
'nonStandardJsDocs',
'strictModuleDepCheck',
'tweakValidation',
'typeInvalidation',
'undefinedNames',
'undefinedVars',
'underscore',
'unknownDefines',
@ -270,15 +249,7 @@ class DocComment(object):
[Type.DOC_FLAG])
if brace:
end_token, contents = _GetMatchingEndBraceAndContents(brace)
for suppression in contents.split('|'):
self.suppressions[suppression] = token
def SuppressionOnly(self):
"""Returns whether this comment contains only suppression flags."""
for flag_type in self.__flags.keys():
if flag_type != 'suppress':
return False
return True
self.suppressions[contents] = token
def AddFlag(self, flag):
"""Add a new document flag.
@ -294,7 +265,10 @@ class DocComment(object):
Returns:
True if documentation may be pulled off the superclass.
"""
return self.HasFlag('inheritDoc') or self.HasFlag('override')
return (self.HasFlag('inheritDoc') or
(self.HasFlag('override') and
not self.HasFlag('return') and
not self.HasFlag('param')))
def HasFlag(self, flag_type):
"""Test if the given flag has been set.
@ -481,8 +455,7 @@ def _GetEndTokenAndContents(start_token):
last_line = iterator.line_number
last_token = None
contents = ''
doc_depth = 0
while not iterator.type in Type.FLAG_ENDING_TYPES or doc_depth > 0:
while not iterator.type in Type.FLAG_ENDING_TYPES:
if (iterator.IsFirstInLine() and
DocFlag.EMPTY_COMMENT_LINE.match(iterator.line)):
# If we have a blank comment line, consider that an implicit
@ -497,17 +470,6 @@ def _GetEndTokenAndContents(start_token):
# only a doc comment prefix or whitespace.
break
# b/2983692
# don't prematurely match against a @flag if inside a doc flag
# need to think about what is the correct behavior for unterminated
# inline doc flags
if (iterator.type == Type.DOC_START_BRACE and
iterator.next.type == Type.DOC_INLINE_FLAG):
doc_depth += 1
elif (iterator.type == Type.DOC_END_BRACE and
doc_depth > 0):
doc_depth -= 1
if iterator.type in Type.FLAG_DESCRIPTION_TYPES:
contents += iterator.string
last_token = iterator
@ -547,7 +509,6 @@ class Function(object):
self.is_constructor = doc and doc.HasFlag('constructor')
self.is_interface = doc and doc.HasFlag('interface')
self.has_return = False
self.has_throw = False
self.has_this = False
self.name = name
self.doc = doc
@ -932,11 +893,6 @@ class StateTracker(object):
if function:
function.has_return = True
elif type == Type.KEYWORD and token.string == 'throw':
function = self.GetFunction()
if function:
function.has_throw = True
elif type == Type.SIMPLE_LVALUE:
identifier = token.values['identifier']
jsdoc = self.GetDocComment()

View File

@ -19,16 +19,15 @@
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)')
import copy
from closure_linter import javascripttokens
from closure_linter.common import tokens
from closure_linter import javascripttokens
import copy
# Shorthand
JavaScriptToken = javascripttokens.JavaScriptToken
Type = tokens.TokenType
def GetFirstTokenInSameLine(token):
"""Returns the first token in the same line as token.
@ -43,58 +42,6 @@ def GetFirstTokenInSameLine(token):
return token
def GetFirstTokenInPreviousLine(token):
"""Returns the first token in the previous line as token.
Args:
token: Any token in the line.
Returns:
The first token in the previous line as token, or None if token is on the
first line.
"""
first_in_line = GetFirstTokenInSameLine(token)
if first_in_line.previous:
return GetFirstTokenInSameLine(first_in_line.previous)
return None
def GetLastTokenInSameLine(token):
"""Returns the last token in the same line as token.
Args:
token: Any token in the line.
Returns:
The last token in the same line as token.
"""
while not token.IsLastInLine():
token = token.next
return token
def GetAllTokensInSameLine(token):
"""Returns all tokens in the same line as the given token.
Args:
token: Any token in the line.
Returns:
All tokens on the same line as the given token.
"""
first_token = GetFirstTokenInSameLine(token)
last_token = GetLastTokenInSameLine(token)
tokens_in_line = []
while first_token != last_token:
tokens_in_line.append(first_token)
first_token = first_token.next
tokens_in_line.append(last_token)
return tokens_in_line
def CustomSearch(start_token, func, end_func=None, distance=None,
reverse=False):
"""Returns the first token where func is True within distance of this token.
@ -130,14 +77,14 @@ def CustomSearch(start_token, func, end_func=None, distance=None,
else:
while token and (distance is None or distance > 0):
next_token = token.next
if next_token:
if func(next_token):
return next_token
if end_func and end_func(next_token):
next = token.next
if next:
if func(next):
return next
if end_func and end_func(next):
return None
token = next_token
token = next
if distance is not None:
distance -= 1
@ -176,6 +123,7 @@ def SearchExcept(start_token, token_types, distance=None, reverse=False):
reverse: When true, search the tokens before this one instead of the tokens
after it
Returns:
The first token of any type in token_types within distance of this token, or
None if no such token is found.
@ -225,21 +173,19 @@ def DeleteToken(token):
following_token.metadata.last_code = token.metadata.last_code
following_token = following_token.next
def DeleteTokens(token, token_count):
def DeleteTokens(token, tokenCount):
"""Deletes the given number of tokens starting with the given token.
Args:
token: The token to start deleting at.
token_count: The total number of tokens to delete.
tokenCount: The total number of tokens to delete.
"""
for i in xrange(1, token_count):
for i in xrange(1, tokenCount):
DeleteToken(token.next)
DeleteToken(token)
def InsertTokenAfter(new_token, token):
"""Insert new_token after token.
"""Insert new_token after token
Args:
new_token: A token to be added to the stream
@ -275,21 +221,6 @@ def InsertTokenAfter(new_token, token):
iterator = iterator.next
def InsertTokensAfter(new_tokens, token):
"""Insert multiple tokens after token.
Args:
new_tokens: An array of tokens to be added to the stream
token: A token already in the stream
"""
# TODO(user): It would be nicer to have InsertTokenAfter defer to here
# instead of vice-versa.
current_token = token
for new_token in new_tokens:
InsertTokenAfter(new_token, current_token)
current_token = new_token
def InsertSpaceTokenAfter(token):
"""Inserts a space token after the given token.
@ -297,44 +228,28 @@ def InsertSpaceTokenAfter(token):
token: The token to insert a space token after
Returns:
A single space token
"""
A single space token"""
space_token = JavaScriptToken(' ', Type.WHITESPACE, token.line,
token.line_number)
InsertTokenAfter(space_token, token)
def InsertBlankLineAfter(token):
def InsertLineAfter(token):
"""Inserts a blank line after the given token.
Args:
token: The token to insert a blank line after
Returns:
A single space token
"""
A single space token"""
blank_token = JavaScriptToken('', Type.BLANK_LINE, '',
token.line_number + 1)
InsertLineAfter(token, [blank_token])
def InsertLineAfter(token, new_tokens):
"""Inserts a new line consisting of new_tokens after the given token.
Args:
token: The token to insert after.
new_tokens: The tokens that will make up the new line.
"""
insert_location = token
for new_token in new_tokens:
InsertTokenAfter(new_token, insert_location)
insert_location = new_token
# Update all subsequent line numbers.
next_token = new_tokens[-1].next
while next_token:
next_token.line_number += 1
next_token = next_token.next
InsertTokenAfter(blank_token, token)
# Update all subsequent ine numbers.
blank_token = blank_token.next
while blank_token:
blank_token.line_number += 1
blank_token = blank_token.next
def SplitToken(token, position):
@ -360,10 +275,6 @@ def SplitToken(token, position):
def Compare(token1, token2):
"""Compares two tokens and determines their relative order.
Args:
token1: The first token to compare.
token2: The second token to compare.
Returns:
A negative integer, zero, or a positive integer as the first token is
before, equal, or after the second in the token stream.

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,5 @@
[egg_info]
tag_build =
tag_date = 0
tag_svn_revision = 0

View File

@ -20,7 +20,7 @@ except ImportError:
from distutils.core import setup
setup(name='closure_linter',
version='2.3.5',
version='2.2.6',
description='Closure Linter',
license='Apache',
author='The Closure Linter Authors',