mirror of
https://github.com/ivuorinen/tree-sitter-shellspec.git
synced 2026-01-26 03:34:03 +00:00
chore: add eclint for editorconfig linting and fix violations
- Install eclint ^2.8.1 for editorconfig validation and fixing - Add .eclintignore to exclude generated files and dependencies - Add npm scripts: lint:editorconfig and lint:editorconfig:fix - Fix indentation issues in CONTRIBUTING.md (3 spaces -> 2 spaces) - Fix code alignment in scanner.c to match editorconfig rules - Regenerate parser after scanner.c formatting changes
This commit is contained in:
21
.eclintignore
Normal file
21
.eclintignore
Normal file
@@ -0,0 +1,21 @@
|
||||
# Dependencies
|
||||
node_modules/
|
||||
|
||||
# Generated files
|
||||
src/parser.c
|
||||
src/grammar.json
|
||||
src/node-types.json
|
||||
src/tree_sitter/
|
||||
|
||||
# Build artifacts
|
||||
build/
|
||||
dist/
|
||||
|
||||
# Logs
|
||||
*.log
|
||||
megalinter-reports/
|
||||
|
||||
# Lock files
|
||||
package-lock.json
|
||||
pnpm-lock.yaml
|
||||
yarn.lock
|
||||
112
CONTRIBUTING.md
112
CONTRIBUTING.md
@@ -28,42 +28,42 @@ Thank you for your interest in contributing to tree-sitter-shellspec! This docum
|
||||
1. Fork the repository on GitHub
|
||||
2. Clone your fork locally:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/YOUR_USERNAME/tree-sitter-shellspec.git
|
||||
cd tree-sitter-shellspec
|
||||
```
|
||||
```bash
|
||||
git clone https://github.com/YOUR_USERNAME/tree-sitter-shellspec.git
|
||||
cd tree-sitter-shellspec
|
||||
```
|
||||
|
||||
3. Add the upstream repository:
|
||||
|
||||
```bash
|
||||
git remote add upstream https://github.com/ivuorinen/tree-sitter-shellspec.git
|
||||
```
|
||||
```bash
|
||||
git remote add upstream https://github.com/ivuorinen/tree-sitter-shellspec.git
|
||||
```
|
||||
|
||||
## Development Setup
|
||||
|
||||
1. **Install dependencies:**
|
||||
|
||||
```bash
|
||||
npm install
|
||||
```
|
||||
```bash
|
||||
npm install
|
||||
```
|
||||
|
||||
2. **Generate the grammar:**
|
||||
|
||||
```bash
|
||||
npm run generate
|
||||
```
|
||||
```bash
|
||||
npm run generate
|
||||
```
|
||||
|
||||
3. **Run tests:**
|
||||
|
||||
```bash
|
||||
npm test
|
||||
```
|
||||
```bash
|
||||
npm test
|
||||
```
|
||||
|
||||
4. **Build the parser:**
|
||||
|
||||
```bash
|
||||
npm run build
|
||||
```
|
||||
```bash
|
||||
npm run build
|
||||
```
|
||||
|
||||
### Development Workflow
|
||||
|
||||
@@ -209,33 +209,33 @@ tree-sitter test --debug
|
||||
|
||||
1. **Create a feature branch:**
|
||||
|
||||
```bash
|
||||
git checkout -b feature/your-feature-name
|
||||
```
|
||||
```bash
|
||||
git checkout -b feature/your-feature-name
|
||||
```
|
||||
|
||||
2. **Make your changes** following the guidelines above
|
||||
|
||||
3. **Commit with clear messages:**
|
||||
|
||||
```bash
|
||||
git commit -m "feat: add support for Data block modifiers
|
||||
```bash
|
||||
git commit -m "feat: add support for Data block modifiers
|
||||
|
||||
- Add :raw and :expand modifier support
|
||||
- Update test cases for new syntax
|
||||
- Add documentation examples"
|
||||
```
|
||||
- Add :raw and :expand modifier support
|
||||
- Update test cases for new syntax
|
||||
- Add documentation examples"
|
||||
```
|
||||
|
||||
4. **Push to your fork:**
|
||||
|
||||
```bash
|
||||
git push origin feature/your-feature-name
|
||||
```
|
||||
```bash
|
||||
git push origin feature/your-feature-name
|
||||
```
|
||||
|
||||
5. **Create a Pull Request** with:
|
||||
- Clear description of changes
|
||||
- References to related issues
|
||||
- Test results and coverage
|
||||
- Breaking change notes (if any)
|
||||
- Clear description of changes
|
||||
- References to related issues
|
||||
- Test results and coverage
|
||||
- Breaking change notes (if any)
|
||||
|
||||
### Commit Message Guidelines
|
||||
|
||||
@@ -282,43 +282,43 @@ Use the [Grammar Issue template](.github/ISSUE_TEMPLATE/grammar_issue.md) for:
|
||||
### High Priority
|
||||
|
||||
1. **Enhanced Data block support**
|
||||
- `:raw` and `:expand` modifiers
|
||||
- Pipe filter syntax (`Data | command`)
|
||||
- Multi-line `#|` syntax
|
||||
- `:raw` and `:expand` modifiers
|
||||
- Pipe filter syntax (`Data | command`)
|
||||
- Multi-line `#|` syntax
|
||||
|
||||
2. **Assertion parsing**
|
||||
- When/The statement structures
|
||||
- Matcher syntax parsing
|
||||
- Subject/predicate analysis
|
||||
- When/The statement structures
|
||||
- Matcher syntax parsing
|
||||
- Subject/predicate analysis
|
||||
|
||||
3. **Performance optimization**
|
||||
- Reduce parser conflicts
|
||||
- Optimize grammar rules
|
||||
- Improve parsing speed
|
||||
- Reduce parser conflicts
|
||||
- Optimize grammar rules
|
||||
- Improve parsing speed
|
||||
|
||||
### Medium Priority
|
||||
|
||||
1. **Editor integration**
|
||||
- Neovim configuration examples
|
||||
- VS Code extension support
|
||||
- Emacs tree-sitter integration
|
||||
- Neovim configuration examples
|
||||
- VS Code extension support
|
||||
- Emacs tree-sitter integration
|
||||
|
||||
2. **Tooling improvements**
|
||||
- Syntax highlighting themes
|
||||
- Language server features
|
||||
- Code formatting rules
|
||||
- Syntax highlighting themes
|
||||
- Language server features
|
||||
- Code formatting rules
|
||||
|
||||
3. **Documentation**
|
||||
- Usage tutorials
|
||||
- Grammar development guide
|
||||
- Editor setup instructions
|
||||
- Usage tutorials
|
||||
- Grammar development guide
|
||||
- Editor setup instructions
|
||||
|
||||
### Low Priority
|
||||
|
||||
1. **Advanced features**
|
||||
- ShellSpec custom matchers
|
||||
- Configuration file parsing
|
||||
- Metadata extraction
|
||||
- ShellSpec custom matchers
|
||||
- Configuration file parsing
|
||||
- Metadata extraction
|
||||
|
||||
## Development Resources
|
||||
|
||||
|
||||
2926
package-lock.json
generated
2926
package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -16,6 +16,8 @@
|
||||
"lint": "npx mega-linter-runner",
|
||||
"lint:yaml": "yamllint .",
|
||||
"lint:markdown": "markdownlint . --config .markdownlint.json --ignore node_modules --fix",
|
||||
"lint:editorconfig": "eclint check .",
|
||||
"lint:editorconfig:fix": "eclint fix .",
|
||||
"format": "prettier --write .",
|
||||
"format:check": "prettier --check .",
|
||||
"precommit": "pre-commit run --all-files",
|
||||
@@ -37,6 +39,7 @@
|
||||
"tree-sitter-bash": "^0.25.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"eclint": "^2.8.1",
|
||||
"markdownlint-cli": "^0.46.0",
|
||||
"nodemon": "^3.0.1",
|
||||
"prettier": "^3.6.2",
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
/* Automatically @generated by tree-sitter v0.25.9 */
|
||||
/* Automatically @generated by tree-sitter v0.25.10 */
|
||||
|
||||
#include "tree_sitter/parser.h"
|
||||
|
||||
|
||||
@@ -116,7 +116,7 @@ static unsigned serialize(Scanner *scanner, char *buffer) {
|
||||
size += sizeof(uint32_t);
|
||||
if (heredoc->delimiter.size > 0) {
|
||||
memcpy(&buffer[size], heredoc->delimiter.contents,
|
||||
heredoc->delimiter.size);
|
||||
heredoc->delimiter.size);
|
||||
size += heredoc->delimiter.size;
|
||||
}
|
||||
}
|
||||
@@ -159,7 +159,7 @@ static void deserialize(Scanner *scanner, const char *buffer, unsigned length) {
|
||||
|
||||
if (heredoc->delimiter.size > 0) {
|
||||
memcpy(heredoc->delimiter.contents, &buffer[size],
|
||||
heredoc->delimiter.size);
|
||||
heredoc->delimiter.size);
|
||||
size += heredoc->delimiter.size;
|
||||
// Ensure NUL termination for safety
|
||||
if (heredoc->delimiter.contents[heredoc->delimiter.size - 1] != '\0') {
|
||||
@@ -190,9 +190,9 @@ static bool advance_word(TSLexer *lexer, String *unquoted_word) {
|
||||
}
|
||||
|
||||
while (lexer->lookahead &&
|
||||
!(quote ? lexer->lookahead == quote || lexer->lookahead == '\r' ||
|
||||
lexer->lookahead == '\n'
|
||||
: iswspace(lexer->lookahead))) {
|
||||
!(quote ? lexer->lookahead == quote || lexer->lookahead == '\r' ||
|
||||
lexer->lookahead == '\n'
|
||||
: iswspace(lexer->lookahead))) {
|
||||
if (lexer->lookahead == '\\') {
|
||||
advance(lexer);
|
||||
if (!lexer->lookahead) {
|
||||
@@ -214,7 +214,7 @@ static bool advance_word(TSLexer *lexer, String *unquoted_word) {
|
||||
|
||||
static inline bool scan_bare_dollar(TSLexer *lexer) {
|
||||
while (iswspace(lexer->lookahead) && lexer->lookahead != '\n' &&
|
||||
!lexer->eof(lexer)) {
|
||||
!lexer->eof(lexer)) {
|
||||
skip(lexer);
|
||||
}
|
||||
|
||||
@@ -223,7 +223,7 @@ static inline bool scan_bare_dollar(TSLexer *lexer) {
|
||||
lexer->result_symbol = BARE_DOLLAR;
|
||||
lexer->mark_end(lexer);
|
||||
return iswspace(lexer->lookahead) || lexer->eof(lexer) ||
|
||||
lexer->lookahead == '\"';
|
||||
lexer->lookahead == '\"';
|
||||
}
|
||||
|
||||
return false;
|
||||
@@ -253,8 +253,8 @@ static bool scan_heredoc_end_identifier(Heredoc *heredoc, TSLexer *lexer) {
|
||||
int32_t size = 0;
|
||||
if (heredoc->delimiter.size > 0) {
|
||||
while (lexer->lookahead != '\0' && lexer->lookahead != '\n' &&
|
||||
(int32_t)*array_get(&heredoc->delimiter, size) == lexer->lookahead &&
|
||||
heredoc->current_leading_word.size < heredoc->delimiter.size) {
|
||||
(int32_t)*array_get(&heredoc->delimiter, size) == lexer->lookahead &&
|
||||
heredoc->current_leading_word.size < heredoc->delimiter.size) {
|
||||
array_push(&heredoc->current_leading_word, lexer->lookahead);
|
||||
advance(lexer);
|
||||
size++;
|
||||
@@ -262,14 +262,14 @@ static bool scan_heredoc_end_identifier(Heredoc *heredoc, TSLexer *lexer) {
|
||||
}
|
||||
array_push(&heredoc->current_leading_word, '\0');
|
||||
return heredoc->delimiter.size == 0
|
||||
? false
|
||||
: strcmp(heredoc->current_leading_word.contents,
|
||||
? false
|
||||
: strcmp(heredoc->current_leading_word.contents,
|
||||
heredoc->delimiter.contents) == 0;
|
||||
}
|
||||
|
||||
static bool scan_heredoc_content(Scanner *scanner, TSLexer *lexer,
|
||||
enum TokenType middle_type,
|
||||
enum TokenType end_type) {
|
||||
enum TokenType middle_type,
|
||||
enum TokenType end_type) {
|
||||
bool did_advance = false;
|
||||
Heredoc *heredoc = array_back(&scanner->heredocs);
|
||||
|
||||
@@ -451,7 +451,7 @@ static bool scan(Scanner *scanner, TSLexer *lexer, const bool *valid_symbols) {
|
||||
advance(lexer);
|
||||
lexer->mark_end(lexer);
|
||||
while (lexer->lookahead == '#' || lexer->lookahead == '=' ||
|
||||
lexer->lookahead == '!') {
|
||||
lexer->lookahead == '!') {
|
||||
advance(lexer);
|
||||
}
|
||||
while (iswspace(lexer->lookahead)) {
|
||||
@@ -473,7 +473,7 @@ static bool scan(Scanner *scanner, TSLexer *lexer, const bool *valid_symbols) {
|
||||
}
|
||||
|
||||
if ((valid_symbols[HEREDOC_BODY_BEGINNING] ||
|
||||
valid_symbols[SIMPLE_HEREDOC_BODY]) &&
|
||||
valid_symbols[SIMPLE_HEREDOC_BODY]) &&
|
||||
scanner->heredocs.size > 0 && !array_back(&scanner->heredocs)->started &&
|
||||
!in_error_recovery(valid_symbols)) {
|
||||
return scan_heredoc_content(scanner, lexer, HEREDOC_BODY_BEGINNING,
|
||||
@@ -580,12 +580,12 @@ static bool scan(Scanner *scanner, TSLexer *lexer, const bool *valid_symbols) {
|
||||
}
|
||||
|
||||
if ((valid_symbols[VARIABLE_NAME] || valid_symbols[FILE_DESCRIPTOR] ||
|
||||
valid_symbols[HEREDOC_ARROW]) &&
|
||||
valid_symbols[HEREDOC_ARROW]) &&
|
||||
!valid_symbols[REGEX_NO_SLASH] && !in_error_recovery(valid_symbols)) {
|
||||
for (;;) {
|
||||
if ((lexer->lookahead == ' ' || lexer->lookahead == '\t' ||
|
||||
lexer->lookahead == '\r' ||
|
||||
(lexer->lookahead == '\n' && !valid_symbols[NEWLINE])) &&
|
||||
lexer->lookahead == '\r' ||
|
||||
(lexer->lookahead == '\n' && !valid_symbols[NEWLINE])) &&
|
||||
!valid_symbols[EXPANSION_WORD]) {
|
||||
skip(lexer);
|
||||
} else if (lexer->lookahead == '\\') {
|
||||
@@ -616,8 +616,8 @@ static bool scan(Scanner *scanner, TSLexer *lexer, const bool *valid_symbols) {
|
||||
// no '*', '@', '?', '-', '$', '0', '_'
|
||||
if (!valid_symbols[EXPANSION_WORD] &&
|
||||
(lexer->lookahead == '*' || lexer->lookahead == '@' ||
|
||||
lexer->lookahead == '?' || lexer->lookahead == '-' ||
|
||||
lexer->lookahead == '0' || lexer->lookahead == '_')) {
|
||||
lexer->lookahead == '?' || lexer->lookahead == '-' ||
|
||||
lexer->lookahead == '0' || lexer->lookahead == '_')) {
|
||||
lexer->mark_end(lexer);
|
||||
advance(lexer);
|
||||
if (lexer->lookahead == '=' || lexer->lookahead == '[' ||
|
||||
@@ -707,10 +707,10 @@ static bool scan(Scanner *scanner, TSLexer *lexer, const bool *valid_symbols) {
|
||||
}
|
||||
if (lexer->lookahead == '=' || lexer->lookahead == '[' ||
|
||||
(lexer->lookahead == ':' && !valid_symbols[CLOSING_BRACE] &&
|
||||
!valid_symbols
|
||||
[OPENING_PAREN]) || // TODO(amaanq): more cases for regular word
|
||||
// chars but not variable names for function
|
||||
// words, only handling : for now? #235
|
||||
!valid_symbols
|
||||
[OPENING_PAREN]) || // TODO(amaanq): more cases for regular word
|
||||
// chars but not variable names for function
|
||||
// words, only handling : for now? #235
|
||||
lexer->lookahead == '%' ||
|
||||
(lexer->lookahead == '#' && !is_number) || lexer->lookahead == '@' ||
|
||||
(lexer->lookahead == '-' && valid_symbols[CLOSING_BRACE])) {
|
||||
@@ -737,7 +737,7 @@ static bool scan(Scanner *scanner, TSLexer *lexer, const bool *valid_symbols) {
|
||||
|
||||
regex:
|
||||
if ((valid_symbols[REGEX] || valid_symbols[REGEX_NO_SLASH] ||
|
||||
valid_symbols[REGEX_NO_SPACE]) &&
|
||||
valid_symbols[REGEX_NO_SPACE]) &&
|
||||
!in_error_recovery(valid_symbols)) {
|
||||
if (valid_symbols[REGEX] || valid_symbols[REGEX_NO_SPACE]) {
|
||||
while (iswspace(lexer->lookahead)) {
|
||||
@@ -747,7 +747,7 @@ regex:
|
||||
|
||||
if ((lexer->lookahead != '"' && lexer->lookahead != '\'') ||
|
||||
((lexer->lookahead == '$' || lexer->lookahead == '\'') &&
|
||||
valid_symbols[REGEX_NO_SLASH]) ||
|
||||
valid_symbols[REGEX_NO_SLASH]) ||
|
||||
(lexer->lookahead == '\'' && valid_symbols[REGEX_NO_SPACE])) {
|
||||
typedef struct {
|
||||
bool done;
|
||||
@@ -905,8 +905,8 @@ regex:
|
||||
}
|
||||
|
||||
lexer->result_symbol = valid_symbols[REGEX_NO_SLASH] ? REGEX_NO_SLASH
|
||||
: valid_symbols[REGEX_NO_SPACE] ? REGEX_NO_SPACE
|
||||
: REGEX;
|
||||
: valid_symbols[REGEX_NO_SPACE] ? REGEX_NO_SPACE
|
||||
: REGEX;
|
||||
if (valid_symbols[REGEX] && !state.advanced_once) {
|
||||
return false;
|
||||
}
|
||||
@@ -1035,7 +1035,7 @@ extglob_pattern:
|
||||
} State;
|
||||
|
||||
State state = {false, was_non_alpha, scanner->last_glob_paren_depth, 0,
|
||||
0};
|
||||
0};
|
||||
while (!state.done) {
|
||||
switch (lexer->lookahead) {
|
||||
case '\0':
|
||||
@@ -1261,7 +1261,7 @@ void *tree_sitter_shellspec_external_scanner_create() {
|
||||
}
|
||||
|
||||
bool tree_sitter_shellspec_external_scanner_scan(void *payload, TSLexer *lexer,
|
||||
const bool *valid_symbols) {
|
||||
const bool *valid_symbols) {
|
||||
Scanner *scanner = (Scanner *)payload;
|
||||
return scan(scanner, lexer, valid_symbols);
|
||||
}
|
||||
|
||||
@@ -152,7 +152,6 @@ struct TSLanguage {
|
||||
};
|
||||
|
||||
static inline bool set_contains(const TSCharacterRange *ranges, uint32_t len, int32_t lookahead) {
|
||||
if (len == 0) return false;
|
||||
uint32_t index = 0;
|
||||
uint32_t size = len - index;
|
||||
while (size > 1) {
|
||||
|
||||
Reference in New Issue
Block a user