title
stringlengths 2
169
| diff
stringlengths 235
19.5k
| body
stringlengths 0
30.5k
| url
stringlengths 48
84
| created_at
stringlengths 20
20
| closed_at
stringlengths 20
20
| merged_at
stringlengths 20
20
| updated_at
stringlengths 20
20
| diff_len
float64 101
3.99k
| repo_name
stringclasses 83
values | __index_level_0__
int64 15
52.7k
|
---|---|---|---|---|---|---|---|---|---|---|
Arch Linux installation guild | diff --git a/docs/source/installation/linux.rst b/docs/source/installation/linux.rst
index ddf96bd777..ff11bc3cdc 100644
--- a/docs/source/installation/linux.rst
+++ b/docs/source/installation/linux.rst
@@ -39,3 +39,26 @@ the ``activate`` binary by doing ``source bin/activate``, to exit use the ``deac
texlive texlive-latex-extra texlive-fonts-extra
texlive-latex-recommended texlive-science texlive-fonts-extra tipa
+
+Arch Linux
+----------
+Install system libraries::
+
+ # pacman -S cairo ffmpeg opencv sox
+
+Install Latex distribution::
+
+ # pacman -S texlive-most
+
+OR install python-manimlib_:sup:`AUR` package::
+
+ $ git clone https://aur.archlinux.org/python-manimlib.git
+ $ cd python-manimlib
+ $ makepkg -si
+
+You can use AUR helpers such as yay_:sup:`AUR`::
+
+ $ yay -S python-manimlib
+
+.. _python-manimlib: https://aur.archlinux.org/packages/python-manimlib/
+.. _yay: https://aur.archlinux.org/packages/yay/
| Related #629 | https://api.github.com/repos/3b1b/manim/pulls/632 | 2019-07-12T13:20:26Z | 2019-07-14T05:28:00Z | 2019-07-14T05:28:00Z | 2019-07-14T05:28:00Z | 283 | 3b1b/manim | 18,230 |
Add Picnic re-auth flow | diff --git a/homeassistant/components/picnic/config_flow.py b/homeassistant/components/picnic/config_flow.py
index 09a1d5242833..c2d48ca94152 100644
--- a/homeassistant/components/picnic/config_flow.py
+++ b/homeassistant/components/picnic/config_flow.py
@@ -9,6 +9,7 @@
import voluptuous as vol
from homeassistant import config_entries, core, exceptions
+from homeassistant.config_entries import SOURCE_REAUTH
from homeassistant.const import CONF_ACCESS_TOKEN, CONF_PASSWORD, CONF_USERNAME
from .const import CONF_COUNTRY_CODE, COUNTRY_CODES, DOMAIN
@@ -71,8 +72,12 @@ class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
VERSION = 1
+ async def async_step_reauth(self, _):
+ """Perform the re-auth step upon an API authentication error."""
+ return await self.async_step_user()
+
async def async_step_user(self, user_input=None):
- """Handle the initial step."""
+ """Handle the authentication step, this is the generic step for both `step_user` and `step_reauth`."""
if user_input is None:
return self.async_show_form(
step_id="user", data_schema=STEP_USER_DATA_SCHEMA
@@ -90,17 +95,25 @@ async def async_step_user(self, user_input=None):
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
else:
- # Set the unique id and abort if it already exists
- await self.async_set_unique_id(info["unique_id"])
- self._abort_if_unique_id_configured()
-
- return self.async_create_entry(
- title=info["title"],
- data={
- CONF_ACCESS_TOKEN: auth_token,
- CONF_COUNTRY_CODE: user_input[CONF_COUNTRY_CODE],
- },
- )
+ data = {
+ CONF_ACCESS_TOKEN: auth_token,
+ CONF_COUNTRY_CODE: user_input[CONF_COUNTRY_CODE],
+ }
+ existing_entry = await self.async_set_unique_id(info["unique_id"])
+
+ # Abort if we're adding a new config and the unique id is already in use, else create the entry
+ if self.source != SOURCE_REAUTH:
+ self._abort_if_unique_id_configured()
+ return self.async_create_entry(title=info["title"], data=data)
+
+ # In case of re-auth, only continue if an exiting account exists with the same unique id
+ if existing_entry:
+ self.hass.config_entries.async_update_entry(existing_entry, data=data)
+ await self.hass.config_entries.async_reload(existing_entry.entry_id)
+ return self.async_abort(reason="reauth_successful")
+
+ # Set the error because the account is different
+ errors["base"] = "different_account"
return self.async_show_form(
step_id="user", data_schema=STEP_USER_DATA_SCHEMA, errors=errors
diff --git a/homeassistant/components/picnic/strings.json b/homeassistant/components/picnic/strings.json
index 7fbd5e9bef67..9eb51b2fd2a4 100644
--- a/homeassistant/components/picnic/strings.json
+++ b/homeassistant/components/picnic/strings.json
@@ -12,10 +12,12 @@
"error": {
"cannot_connect": "[%key:common::config_flow::error::cannot_connect%]",
"invalid_auth": "[%key:common::config_flow::error::invalid_auth%]",
- "unknown": "[%key:common::config_flow::error::unknown%]"
+ "unknown": "[%key:common::config_flow::error::unknown%]",
+ "different_account": "Account should be the same as used for setting up the integration"
},
"abort": {
- "already_configured": "[%key:common::config_flow::abort::already_configured_device%]"
+ "already_configured": "[%key:common::config_flow::abort::already_configured_device%]",
+ "reauth_successful": "[%key:common::config_flow::abort::reauth_successful%]"
}
}
}
diff --git a/homeassistant/components/picnic/translations/en.json b/homeassistant/components/picnic/translations/en.json
index c7097df12a96..13b62c78757a 100644
--- a/homeassistant/components/picnic/translations/en.json
+++ b/homeassistant/components/picnic/translations/en.json
@@ -1,10 +1,12 @@
{
"config": {
"abort": {
- "already_configured": "Device is already configured"
+ "already_configured": "Device is already configured",
+ "reauth_successful": "Re-authentication was successful"
},
"error": {
"cannot_connect": "Failed to connect",
+ "different_account": "Account should be the same as used for setting up the integration",
"invalid_auth": "Invalid authentication",
"unknown": "Unexpected error"
},
@@ -17,6 +19,5 @@
}
}
}
- },
- "title": "Picnic"
+ }
}
\ No newline at end of file
diff --git a/tests/components/picnic/test_config_flow.py b/tests/components/picnic/test_config_flow.py
index b6bcc17a03d2..3ea54cee5937 100644
--- a/tests/components/picnic/test_config_flow.py
+++ b/tests/components/picnic/test_config_flow.py
@@ -1,23 +1,20 @@
"""Test the Picnic config flow."""
from unittest.mock import patch
+import pytest
from python_picnic_api.session import PicnicAuthError
import requests
-from homeassistant import config_entries
+from homeassistant import config_entries, data_entry_flow
from homeassistant.components.picnic.const import CONF_COUNTRY_CODE, DOMAIN
from homeassistant.const import CONF_ACCESS_TOKEN
+from tests.common import MockConfigEntry
-async def test_form(hass):
- """Test we get the form."""
-
- result = await hass.config_entries.flow.async_init(
- DOMAIN, context={"source": config_entries.SOURCE_USER}
- )
- assert result["type"] == "form"
- assert result["errors"] is None
[email protected]
+def picnic_api():
+ """Create PicnicAPI mock with set response data."""
auth_token = "af3wh738j3fa28l9fa23lhiufahu7l"
auth_data = {
"user_id": "f29-2a6-o32n",
@@ -29,13 +26,27 @@ async def test_form(hass):
}
with patch(
"homeassistant.components.picnic.config_flow.PicnicAPI",
- ) as mock_picnic, patch(
+ ) as picnic_mock:
+ picnic_mock().session.auth_token = auth_token
+ picnic_mock().get_user.return_value = auth_data
+
+ yield picnic_mock
+
+
+async def test_form(hass, picnic_api):
+ """Test we get the form and a config entry is created."""
+
+ result = await hass.config_entries.flow.async_init(
+ DOMAIN, context={"source": config_entries.SOURCE_USER}
+ )
+ assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
+ assert result["step_id"] == "user"
+ assert result["errors"] is None
+
+ with patch(
"homeassistant.components.picnic.async_setup_entry",
return_value=True,
) as mock_setup_entry:
- mock_picnic().session.auth_token = auth_token
- mock_picnic().get_user.return_value = auth_data
-
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
@@ -49,14 +60,14 @@ async def test_form(hass):
assert result2["type"] == "create_entry"
assert result2["title"] == "Teststreet 123b"
assert result2["data"] == {
- CONF_ACCESS_TOKEN: auth_token,
+ CONF_ACCESS_TOKEN: picnic_api().session.auth_token,
CONF_COUNTRY_CODE: "NL",
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_invalid_auth(hass):
- """Test we handle invalid auth."""
+ """Test we handle invalid authentication."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
@@ -74,12 +85,12 @@ async def test_form_invalid_auth(hass):
},
)
- assert result2["type"] == "form"
+ assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["errors"] == {"base": "invalid_auth"}
async def test_form_cannot_connect(hass):
- """Test we handle cannot connect error."""
+ """Test we handle connection errors."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
@@ -97,7 +108,7 @@ async def test_form_cannot_connect(hass):
},
)
- assert result2["type"] == "form"
+ assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["errors"] == {"base": "cannot_connect"}
@@ -120,5 +131,150 @@ async def test_form_exception(hass):
},
)
- assert result2["type"] == "form"
+ assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["errors"] == {"base": "unknown"}
+
+
+async def test_form_already_configured(hass, picnic_api):
+ """Test that an entry with unique id can only be added once."""
+ # Create a mocked config entry and make sure to use the same user_id as set for the picnic_api mock response.
+ MockConfigEntry(
+ domain=DOMAIN,
+ unique_id=picnic_api().get_user()["user_id"],
+ data={CONF_ACCESS_TOKEN: "a3p98fsen.a39p3fap", CONF_COUNTRY_CODE: "NL"},
+ ).add_to_hass(hass)
+
+ result_init = await hass.config_entries.flow.async_init(
+ DOMAIN, context={"source": config_entries.SOURCE_USER}
+ )
+
+ result_configure = await hass.config_entries.flow.async_configure(
+ result_init["flow_id"],
+ {
+ "username": "test-username",
+ "password": "test-password",
+ "country_code": "NL",
+ },
+ )
+ await hass.async_block_till_done()
+
+ assert result_configure["type"] == data_entry_flow.RESULT_TYPE_ABORT
+ assert result_configure["reason"] == "already_configured"
+
+
+async def test_step_reauth(hass, picnic_api):
+ """Test the re-auth flow."""
+ # Create a mocked config entry
+ conf = {CONF_ACCESS_TOKEN: "a3p98fsen.a39p3fap", CONF_COUNTRY_CODE: "NL"}
+
+ MockConfigEntry(
+ domain=DOMAIN,
+ unique_id=picnic_api().get_user()["user_id"],
+ data=conf,
+ ).add_to_hass(hass)
+
+ # Init a re-auth flow
+ result_init = await hass.config_entries.flow.async_init(
+ DOMAIN, context={"source": config_entries.SOURCE_REAUTH}, data=conf
+ )
+ assert result_init["type"] == data_entry_flow.RESULT_TYPE_FORM
+ assert result_init["step_id"] == "user"
+
+ with patch(
+ "homeassistant.components.picnic.async_setup_entry",
+ return_value=True,
+ ):
+ result_configure = await hass.config_entries.flow.async_configure(
+ result_init["flow_id"],
+ {
+ "username": "test-username",
+ "password": "test-password",
+ "country_code": "NL",
+ },
+ )
+ await hass.async_block_till_done()
+
+ # Check that the returned flow has type abort because of successful re-authentication
+ assert result_configure["type"] == data_entry_flow.RESULT_TYPE_ABORT
+ assert result_configure["reason"] == "reauth_successful"
+
+ assert len(hass.config_entries.async_entries()) == 1
+
+
+async def test_step_reauth_failed(hass):
+ """Test the re-auth flow when authentication fails."""
+ # Create a mocked config entry
+ user_id = "f29-2a6-o32n"
+ conf = {CONF_ACCESS_TOKEN: "a3p98fsen.a39p3fap", CONF_COUNTRY_CODE: "NL"}
+
+ MockConfigEntry(
+ domain=DOMAIN,
+ unique_id=user_id,
+ data=conf,
+ ).add_to_hass(hass)
+
+ # Init a re-auth flow
+ result_init = await hass.config_entries.flow.async_init(
+ DOMAIN, context={"source": config_entries.SOURCE_REAUTH}, data=conf
+ )
+ assert result_init["type"] == data_entry_flow.RESULT_TYPE_FORM
+ assert result_init["step_id"] == "user"
+
+ with patch(
+ "homeassistant.components.picnic.config_flow.PicnicHub.authenticate",
+ side_effect=PicnicAuthError,
+ ):
+ result_configure = await hass.config_entries.flow.async_configure(
+ result_init["flow_id"],
+ {
+ "username": "test-username",
+ "password": "test-password",
+ "country_code": "NL",
+ },
+ )
+ await hass.async_block_till_done()
+
+ # Check that the returned flow has type form with error set
+ assert result_configure["type"] == "form"
+ assert result_configure["errors"] == {"base": "invalid_auth"}
+
+ assert len(hass.config_entries.async_entries()) == 1
+
+
+async def test_step_reauth_different_account(hass, picnic_api):
+ """Test the re-auth flow when authentication is done with a different account."""
+ # Create a mocked config entry, unique_id should be different that the user id in the api response
+ conf = {CONF_ACCESS_TOKEN: "a3p98fsen.a39p3fap", CONF_COUNTRY_CODE: "NL"}
+
+ MockConfigEntry(
+ domain=DOMAIN,
+ unique_id="3fpawh-ues-af3ho",
+ data=conf,
+ ).add_to_hass(hass)
+
+ # Init a re-auth flow
+ result_init = await hass.config_entries.flow.async_init(
+ DOMAIN, context={"source": config_entries.SOURCE_REAUTH}, data=conf
+ )
+ assert result_init["type"] == data_entry_flow.RESULT_TYPE_FORM
+ assert result_init["step_id"] == "user"
+
+ with patch(
+ "homeassistant.components.picnic.async_setup_entry",
+ return_value=True,
+ ):
+ result_configure = await hass.config_entries.flow.async_configure(
+ result_init["flow_id"],
+ {
+ "username": "test-username",
+ "password": "test-password",
+ "country_code": "NL",
+ },
+ )
+ await hass.async_block_till_done()
+
+ # Check that the returned flow has type form with error set
+ assert result_configure["type"] == "form"
+ assert result_configure["errors"] == {"base": "different_account"}
+
+ assert len(hass.config_entries.async_entries()) == 1
| <!--
You are amazing! Thanks for contributing to our project!
Please, DO NOT DELETE ANY TEXT from this template! (unless instructed).
-->
## Breaking change
<!--
If your PR contains a breaking change for existing users, it is important
to tell them what breaks, how to make it work again and why we did this.
This piece of text is published with the release notes, so it helps if you
write it towards our users, not us.
Note: Remove this section if this PR is NOT a breaking change.
-->
## Proposed change
<!--
Describe the big picture of your changes here to communicate to the
maintainers why we should accept this pull request. If it fixes a bug
or resolves a feature request, be sure to link to that issue in the
additional information section.
-->
Add the re-authentication flow for the Picnic integration.
## Type of change
<!--
What type of change does your PR introduce to Home Assistant?
NOTE: Please, check only 1! box!
If your PR requires multiple boxes to be checked, you'll most likely need to
split it into multiple PRs. This makes things easier and faster to code review.
-->
- [ ] Dependency upgrade
- [ ] Bugfix (non-breaking change which fixes an issue)
- [ ] New integration (thank you!)
- [x] New feature (which adds functionality to an existing integration)
- [ ] Breaking change (fix/feature causing existing functionality to break)
- [ ] Code quality improvements to existing code or addition of tests
## Additional information
<!--
Details are important, and help maintainers processing your PR.
Please be sure to fill out additional details, if applicable.
-->
- This PR fixes or closes issue: fixes #
- This PR is related to issue:
- Link to documentation pull request:
## Checklist
<!--
Put an `x` in the boxes that apply. You can also fill these out after
creating the PR. If you're unsure about any of them, don't hesitate to ask.
We're here to help! This is simply a reminder of what we are going to look
for before merging your code.
-->
- [x] The code change is tested and works locally.
- [x] Local tests pass. **Your PR cannot be merged unless tests pass**
- [x] There is no commented out code in this PR.
- [x] I have followed the [development checklist][dev-checklist]
- [x] The code has been formatted using Black (`black --fast homeassistant tests`)
- [x] Tests have been added to verify that the new code works.
If user exposed functionality or configuration variables are added/changed:
- [ ] Documentation added/updated for [www.home-assistant.io][docs-repository]
If the code communicates with devices, web services, or third-party tools:
- [ ] The [manifest file][manifest-docs] has all fields filled out correctly.
Updated and included derived files by running: `python3 -m script.hassfest`.
- [ ] New or updated dependencies have been added to `requirements_all.txt`.
Updated by running `python3 -m script.gen_requirements_all`.
- [ ] For the updated dependencies - a link to the changelog, or at minimum a diff between library versions is added to the PR description.
- [ ] Untested files have been added to `.coveragerc`.
The integration reached or maintains the following [Integration Quality Scale][quality-scale]:
<!--
The Integration Quality Scale scores an integration on the code quality
and user experience. Each level of the quality scale consists of a list
of requirements. We highly recommend getting your integration scored!
-->
- [x] No score or internal
- [ ] 🥈 Silver
- [ ] 🥇 Gold
- [ ] 🏆 Platinum
<!--
This project is very active and we have a high turnover of pull requests.
Unfortunately, the number of incoming pull requests is higher than what our
reviewers can review and merge so there is a long backlog of pull requests
waiting for review. You can help here!
By reviewing another pull request, you will help raise the code quality of
that pull request and the final review will be faster. This way the general
pace of pull request reviews will go up and your wait time will go down.
When picking a pull request to review, try to choose one that hasn't yet
been reviewed.
Thanks for helping out!
-->
To help with the load of incoming pull requests:
- [ ] I have reviewed two other [open pull requests][prs] in this repository.
[prs]: https://github.com/home-assistant/core/pulls?q=is%3Aopen+is%3Apr+-author%3A%40me+-draft%3Atrue+-label%3Awaiting-for-upstream+sort%3Acreated-desc+review%3Anone
<!--
Thank you for contributing <3
Below, some useful links you could explore:
-->
[dev-checklist]: https://developers.home-assistant.io/docs/en/development_checklist.html
[manifest-docs]: https://developers.home-assistant.io/docs/en/creating_integration_manifest.html
[quality-scale]: https://developers.home-assistant.io/docs/en/next/integration_quality_scale_index.html
[docs-repository]: https://github.com/home-assistant/home-assistant.io
| https://api.github.com/repos/home-assistant/core/pulls/62938 | 2021-12-28T14:43:30Z | 2022-02-12T16:15:36Z | 2022-02-12T16:15:36Z | 2022-02-13T17:01:49Z | 3,400 | home-assistant/core | 39,435 |
为表格标注添加BBOX排序功能和序号显示 | diff --git a/PPOCRLabel/PPOCRLabel.py b/PPOCRLabel/PPOCRLabel.py
index ce3d66f07f..440c2d8c3e 100644
--- a/PPOCRLabel/PPOCRLabel.py
+++ b/PPOCRLabel/PPOCRLabel.py
@@ -28,7 +28,7 @@
from PyQt5.QtGui import QImage, QCursor, QPixmap, QImageReader
from PyQt5.QtWidgets import QMainWindow, QListWidget, QVBoxLayout, QToolButton, QHBoxLayout, QDockWidget, QWidget, \
QSlider, QGraphicsOpacityEffect, QMessageBox, QListView, QScrollArea, QWidgetAction, QApplication, QLabel, QGridLayout, \
- QFileDialog, QListWidgetItem, QComboBox, QDialog
+ QFileDialog, QListWidgetItem, QComboBox, QDialog, QAbstractItemView
__dir__ = os.path.dirname(os.path.abspath(__file__))
@@ -242,6 +242,20 @@ def __init__(self,
self.labelListDock.setFeatures(QDockWidget.NoDockWidgetFeatures)
listLayout.addWidget(self.labelListDock)
+ # enable labelList drag_drop to adjust bbox order
+ # 设置选择模式为单选
+ self.labelList.setSelectionMode(QAbstractItemView.SingleSelection)
+ # 启用拖拽
+ self.labelList.setDragEnabled(True)
+ # 设置接受拖放
+ self.labelList.viewport().setAcceptDrops(True)
+ # 设置显示将要被放置的位置
+ self.labelList.setDropIndicatorShown(True)
+ # 设置拖放模式为移动项目,如果不设置,默认为复制项目
+ self.labelList.setDragDropMode(QAbstractItemView.InternalMove)
+ # 触发放置
+ self.labelList.model().rowsMoved.connect(self.drag_drop_happened)
+
# ================== Detection Box ==================
self.BoxList = QListWidget()
@@ -589,15 +603,23 @@ def __init__(self,
self.displayLabelOption.setChecked(settings.get(SETTING_PAINT_LABEL, False))
self.displayLabelOption.triggered.connect(self.togglePaintLabelsOption)
+ # Add option to enable/disable box index being displayed at the top of bounding boxes
+ self.displayIndexOption = QAction(getStr('displayIndex'), self)
+ self.displayIndexOption.setCheckable(True)
+ self.displayIndexOption.setChecked(settings.get(SETTING_PAINT_INDEX, False))
+ self.displayIndexOption.triggered.connect(self.togglePaintIndexOption)
+
self.labelDialogOption = QAction(getStr('labelDialogOption'), self)
self.labelDialogOption.setShortcut("Ctrl+Shift+L")
self.labelDialogOption.setCheckable(True)
self.labelDialogOption.setChecked(settings.get(SETTING_PAINT_LABEL, False))
+ self.displayIndexOption.setChecked(settings.get(SETTING_PAINT_INDEX, False))
self.labelDialogOption.triggered.connect(self.speedChoose)
self.autoSaveOption = QAction(getStr('autoSaveMode'), self)
self.autoSaveOption.setCheckable(True)
self.autoSaveOption.setChecked(settings.get(SETTING_PAINT_LABEL, False))
+ self.displayIndexOption.setChecked(settings.get(SETTING_PAINT_INDEX, False))
self.autoSaveOption.triggered.connect(self.autoSaveFunc)
addActions(self.menus.file,
@@ -606,7 +628,7 @@ def __init__(self,
addActions(self.menus.help, (showKeys, showSteps, showInfo))
addActions(self.menus.view, (
- self.displayLabelOption, self.labelDialogOption,
+ self.displayLabelOption, self.displayIndexOption, self.labelDialogOption,
None,
hideAll, showAll, None,
zoomIn, zoomOut, zoomOrg, None,
@@ -964,9 +986,10 @@ def updateBoxlist(self):
else:
self.canvas.selectedShapes_hShape = self.canvas.selectedShapes
for shape in self.canvas.selectedShapes_hShape:
- item = self.shapesToItemsbox[shape] # listitem
- text = [(int(p.x()), int(p.y())) for p in shape.points]
- item.setText(str(text))
+ if shape in self.shapesToItemsbox.keys():
+ item = self.shapesToItemsbox[shape] # listitem
+ text = [(int(p.x()), int(p.y())) for p in shape.points]
+ item.setText(str(text))
self.actions.undo.setEnabled(True)
self.setDirty()
@@ -1040,6 +1063,8 @@ def shapeSelectionChanged(self, selected_shapes):
def addLabel(self, shape):
shape.paintLabel = self.displayLabelOption.isChecked()
+ shape.paintIdx = self.displayIndexOption.isChecked()
+
item = HashableQListWidgetItem(shape.label)
item.setFlags(item.flags() | Qt.ItemIsUserCheckable)
item.setCheckState(Qt.Unchecked) if shape.difficult else item.setCheckState(Qt.Checked)
@@ -1083,6 +1108,7 @@ def remLabels(self, shapes):
def loadLabels(self, shapes):
s = []
+ shape_index = 0
for label, points, line_color, key_cls, difficult in shapes:
shape = Shape(label=label, line_color=line_color, key_cls=key_cls)
for x, y in points:
@@ -1094,6 +1120,8 @@ def loadLabels(self, shapes):
shape.addPoint(QPointF(x, y))
shape.difficult = difficult
+ shape.idx = shape_index
+ shape_index += 1
# shape.locked = False
shape.close()
s.append(shape)
@@ -1209,18 +1237,54 @@ def boxSelectionChanged(self):
self.canvas.deSelectShape()
def labelItemChanged(self, item):
- shape = self.itemsToShapes[item]
- label = item.text()
- if label != shape.label:
- shape.label = item.text()
- # shape.line_color = generateColorByText(shape.label)
- self.setDirty()
- elif not ((item.checkState() == Qt.Unchecked) ^ (not shape.difficult)):
- shape.difficult = True if item.checkState() == Qt.Unchecked else False
- self.setDirty()
- else: # User probably changed item visibility
- self.canvas.setShapeVisible(shape, True) # item.checkState() == Qt.Checked
- # self.actions.save.setEnabled(True)
+ # avoid accidentally triggering the itemChanged siganl with unhashable item
+ # Unknown trigger condition
+ if type(item) == HashableQListWidgetItem:
+ shape = self.itemsToShapes[item]
+ label = item.text()
+ if label != shape.label:
+ shape.label = item.text()
+ # shape.line_color = generateColorByText(shape.label)
+ self.setDirty()
+ elif not ((item.checkState() == Qt.Unchecked) ^ (not shape.difficult)):
+ shape.difficult = True if item.checkState() == Qt.Unchecked else False
+ self.setDirty()
+ else: # User probably changed item visibility
+ self.canvas.setShapeVisible(shape, True) # item.checkState() == Qt.Checked
+ # self.actions.save.setEnabled(True)
+ else:
+ print('enter labelItemChanged slot with unhashable item: ', item, item.text())
+
+ def drag_drop_happened(self):
+ '''
+ label list drag drop signal slot
+ '''
+ # print('___________________drag_drop_happened_______________')
+ # should only select single item
+ for item in self.labelList.selectedItems():
+ newIndex = self.labelList.indexFromItem(item).row()
+
+ # only support drag_drop one item
+ assert len(self.canvas.selectedShapes) > 0
+ for shape in self.canvas.selectedShapes:
+ selectedShapeIndex = shape.idx
+
+ if newIndex == selectedShapeIndex:
+ return
+
+ # move corresponding item in shape list
+ shape = self.canvas.shapes.pop(selectedShapeIndex)
+ self.canvas.shapes.insert(newIndex, shape)
+
+ # update bbox index
+ self.canvas.updateShapeIndex()
+
+ # boxList update simultaneously
+ item = self.BoxList.takeItem(selectedShapeIndex)
+ self.BoxList.insertItem(newIndex, item)
+
+ # changes happen
+ self.setDirty()
# Callback functions:
def newShape(self, value=True):
@@ -1560,6 +1624,7 @@ def closeEvent(self, event):
settings[SETTING_LAST_OPEN_DIR] = ''
settings[SETTING_PAINT_LABEL] = self.displayLabelOption.isChecked()
+ settings[SETTING_PAINT_INDEX] = self.displayIndexOption.isChecked()
settings[SETTING_DRAW_SQUARE] = self.drawSquaresOption.isChecked()
settings.save()
try:
@@ -1946,8 +2011,16 @@ def loadPredefinedClasses(self, predefClassesFile):
self.labelHist.append(line)
def togglePaintLabelsOption(self):
+ self.displayIndexOption.setChecked(False)
+ for shape in self.canvas.shapes:
+ shape.paintLabel = self.displayLabelOption.isChecked()
+ shape.paintIdx = self.displayIndexOption.isChecked()
+
+ def togglePaintIndexOption(self):
+ self.displayLabelOption.setChecked(False)
for shape in self.canvas.shapes:
shape.paintLabel = self.displayLabelOption.isChecked()
+ shape.paintIdx = self.displayIndexOption.isChecked()
def toogleDrawSquare(self):
self.canvas.setDrawingShapeToSquare(self.drawSquaresOption.isChecked())
@@ -2187,6 +2260,7 @@ def TableRecognition(self):
shapes = []
result_len = len(region['res']['boxes'])
+ order_index = 0
for i in range(result_len):
bbox = np.array(region['res']['boxes'][i])
rec_text = region['res']['rec_res'][i][0]
@@ -2205,6 +2279,8 @@ def TableRecognition(self):
x, y, snapped = self.canvas.snapPointToCanvas(x, y)
shape.addPoint(QPointF(x, y))
shape.difficult = False
+ shape.idx = order_index
+ order_index += 1
# shape.locked = False
shape.close()
self.addLabel(shape)
diff --git a/PPOCRLabel/libs/canvas.py b/PPOCRLabel/libs/canvas.py
index e6cddf13ed..780ca71af5 100644
--- a/PPOCRLabel/libs/canvas.py
+++ b/PPOCRLabel/libs/canvas.py
@@ -314,21 +314,23 @@ def mouseReleaseEvent(self, ev):
QApplication.restoreOverrideCursor() # ?
if self.movingShape and self.hShape:
- index = self.shapes.index(self.hShape)
- if (
- self.shapesBackups[-1][index].points
- != self.shapes[index].points
- ):
- self.storeShapes()
- self.shapeMoved.emit() # connect to updateBoxlist in PPOCRLabel.py
+ if self.hShape in self.shapes:
+ index = self.shapes.index(self.hShape)
+ if (
+ self.shapesBackups[-1][index].points
+ != self.shapes[index].points
+ ):
+ self.storeShapes()
+ self.shapeMoved.emit() # connect to updateBoxlist in PPOCRLabel.py
- self.movingShape = False
+ self.movingShape = False
def endMove(self, copy=False):
assert self.selectedShapes and self.selectedShapesCopy
assert len(self.selectedShapesCopy) == len(self.selectedShapes)
if copy:
for i, shape in enumerate(self.selectedShapesCopy):
+ shape.idx = len(self.shapes) # add current box index
self.shapes.append(shape)
self.selectedShapes[i].selected = False
self.selectedShapes[i] = shape
@@ -524,6 +526,9 @@ def deleteSelected(self):
self.storeShapes()
self.selectedShapes = []
self.update()
+
+ self.updateShapeIndex()
+
return deleted_shapes
def storeShapes(self):
@@ -651,7 +656,8 @@ def finalise(self):
return
self.current.close()
- self.shapes.append(self.current)
+ self.current.idx = len(self.shapes) # add current box index
+ self.shapes.append(self.current)
self.current = None
self.setHiding(False)
self.newShape.emit()
@@ -842,6 +848,7 @@ def loadShapes(self, shapes, replace=True):
self.hVertex = None
# self.hEdge = None
self.storeShapes()
+ self.updateShapeIndex()
self.repaint()
def setShapeVisible(self, shape, value):
@@ -883,10 +890,16 @@ def restoreShape(self):
self.selectedShapes = []
for shape in self.shapes:
shape.selected = False
+ self.updateShapeIndex()
self.repaint()
-
+
@property
def isShapeRestorable(self):
if len(self.shapesBackups) < 2:
return False
- return True
\ No newline at end of file
+ return True
+
+ def updateShapeIndex(self):
+ for i in range(len(self.shapes)):
+ self.shapes[i].idx = i
+ self.update()
\ No newline at end of file
diff --git a/PPOCRLabel/libs/constants.py b/PPOCRLabel/libs/constants.py
index 58c8222ec5..f075f4a539 100644
--- a/PPOCRLabel/libs/constants.py
+++ b/PPOCRLabel/libs/constants.py
@@ -21,6 +21,7 @@
SETTING_WIN_STATE = 'window/state'
SETTING_SAVE_DIR = 'savedir'
SETTING_PAINT_LABEL = 'paintlabel'
+SETTING_PAINT_INDEX = 'paintindex'
SETTING_LAST_OPEN_DIR = 'lastOpenDir'
SETTING_AUTO_SAVE = 'autosave'
SETTING_SINGLE_CLASS = 'singleclass'
diff --git a/PPOCRLabel/libs/editinlist.py b/PPOCRLabel/libs/editinlist.py
index 79d2d3aa37..4bcc11ec47 100644
--- a/PPOCRLabel/libs/editinlist.py
+++ b/PPOCRLabel/libs/editinlist.py
@@ -26,4 +26,4 @@ def mouseDoubleClickEvent(self, event):
def leaveEvent(self, event):
# close edit
for i in range(self.count()):
- self.closePersistentEditor(self.item(i))
+ self.closePersistentEditor(self.item(i))
\ No newline at end of file
diff --git a/PPOCRLabel/libs/shape.py b/PPOCRLabel/libs/shape.py
index 97e2eb7238..180456a10c 100644
--- a/PPOCRLabel/libs/shape.py
+++ b/PPOCRLabel/libs/shape.py
@@ -46,15 +46,16 @@ class Shape(object):
point_size = 8
scale = 1.0
- def __init__(self, label=None, line_color=None, difficult=False, key_cls="None", paintLabel=False):
+ def __init__(self, label=None, line_color=None, difficult=False, key_cls="None", paintLabel=False, paintIdx=False):
self.label = label
- self.idx = 0
+ self.idx = None # bbox order, only for table annotation
self.points = []
self.fill = False
self.selected = False
self.difficult = difficult
self.key_cls = key_cls
self.paintLabel = paintLabel
+ self.paintIdx = paintIdx
self.locked = False
self.direction = 0
self.center = None
@@ -164,6 +165,25 @@ def paint(self, painter):
min_y += MIN_Y_LABEL
painter.drawText(min_x, min_y, self.label)
+ # Draw number at the top-right
+ if self.paintIdx:
+ min_x = sys.maxsize
+ min_y = sys.maxsize
+ for point in self.points:
+ min_x = min(min_x, point.x())
+ min_y = min(min_y, point.y())
+ if min_x != sys.maxsize and min_y != sys.maxsize:
+ font = QFont()
+ font.setPointSize(8)
+ font.setBold(True)
+ painter.setFont(font)
+ text = ''
+ if self.idx != None:
+ text = str(self.idx)
+ if min_y < MIN_Y_LABEL:
+ min_y += MIN_Y_LABEL
+ painter.drawText(min_x, min_y, text)
+
if self.fill:
color = self.select_fill_color if self.selected else self.fill_color
painter.fillPath(line_path, color)
diff --git a/PPOCRLabel/resources/strings/strings-en.properties b/PPOCRLabel/resources/strings/strings-en.properties
index 0b112c4646..1b628016c0 100644
--- a/PPOCRLabel/resources/strings/strings-en.properties
+++ b/PPOCRLabel/resources/strings/strings-en.properties
@@ -61,6 +61,7 @@ labels=Labels
autoSaveMode=Auto Save mode
singleClsMode=Single Class Mode
displayLabel=Display Labels
+displayIndex=Display box index
fileList=File List
files=Files
advancedMode=Advanced Mode
diff --git a/PPOCRLabel/resources/strings/strings-zh-CN.properties b/PPOCRLabel/resources/strings/strings-zh-CN.properties
index 184247e85b..0758729a8c 100644
--- a/PPOCRLabel/resources/strings/strings-zh-CN.properties
+++ b/PPOCRLabel/resources/strings/strings-zh-CN.properties
@@ -61,6 +61,7 @@ labels=标签
autoSaveMode=自动保存模式
singleClsMode=单一类别模式
displayLabel=显示类别
+displayIndex=显示box序号
fileList=文件列表
files=文件
advancedMode=专家模式
| 1、为表格标注添加BBOX排序功能和序号显示。
2、canvas类中成员hShape在shape被删除时没有被对应删除,而成员shapesToItemsbox中的shape的key已被删除,导致在遍历shapesToItemsbox会出现找不到key的情况。临时解决办法:在访问该key前检查是否存在。
3、已知问题:labelList的itemChanged信号不明触发输入了一个unhashable参数(对应的槽boxItemChanged接受一个HashableQListWidgetItem的对象)。临时解决办法:检查输入参数的类型。 | https://api.github.com/repos/PaddlePaddle/PaddleOCR/pulls/6829 | 2022-07-08T07:01:23Z | 2022-07-14T08:30:15Z | 2022-07-14T08:30:15Z | 2022-07-14T08:30:15Z | 3,947 | PaddlePaddle/PaddleOCR | 42,762 |
Add mozilla observatory | diff --git a/README.md b/README.md
index 1d86b5da2f..1c897a39d3 100644
--- a/README.md
+++ b/README.md
@@ -793,6 +793,8 @@ API | Description | Auth | HTTPS | CORS |
| [HaveIBeenPwned](https://haveibeenpwned.com/API/v3) | Passwords which have previously been exposed in data breaches | `apiKey` | Yes | Unknown |
| [Intelligence X](https://github.com/IntelligenceX/SDK/blob/master/Intelligence%20X%20API.pdf) | Perform OSINT via Intelligence X | `apiKey` | Yes | Unknown |
| [LoginRadius](https://www.loginradius.com/docs/) | Managed User Authentication Service | `apiKey` | Yes | Yes |
+| [Mozilla http scanner](https://github.com/mozilla/http-observatory/blob/master/httpobs/docs/api.md) | Mozilla observatory http scanner | No | Yes | Unknown |
+| [Mozilla tls scanner](https://github.com/mozilla/tls-observatory#api-endpoints) | Mozilla observatory tls scanner | No | Yes | Unknown |
| [National Vulnerability Database](https://nvd.nist.gov/vuln/Data-Feeds/JSON-feed-changelog) | U.S. National Vulnerability Database | No | Yes | Unknown |
| [PhishStats](https://phishstats.info/) | Phishing database | No | Yes | Unknown |
| [Pulsedive](https://pulsedive.com/api/) | Scan, search and collect threat intelligence data in real-time | `apiKey` | Yes | Unknown |
| Thank you for taking the time to work on a Pull Request for this project!
To ensure your PR is dealt with swiftly please check the following:
- [x] Your submissions are formatted according to the guidelines in the [contributing guide](CONTRIBUTING.md)
- [x] Your additions are ordered alphabetically
- [x] Your submission has a useful description
- [x] The description does not end with punctuation
- [x] Each table column should be padded with one space on either side
- [x] You have searched the repository for any relevant issues or pull requests
- [x] Any category you are creating has the minimum requirement of 3 items
- [x] All changes have been [squashed][squash-link] into a single commit
[squash-link]: <https://github.com/todotxt/todo.txt-android/wiki/Squash-All-Commits-Related-to-a-Single-Issue-into-a-Single-Commit>
| https://api.github.com/repos/public-apis/public-apis/pulls/1273 | 2020-05-21T09:49:24Z | 2021-04-17T02:53:25Z | 2021-04-17T02:53:24Z | 2021-04-17T02:53:31Z | 358 | public-apis/public-apis | 35,806 |
New option: Add server certs to client chain | diff --git a/mitmproxy/cmdline.py b/mitmproxy/cmdline.py
index b1b860f83b..d7de350fff 100644
--- a/mitmproxy/cmdline.py
+++ b/mitmproxy/cmdline.py
@@ -434,6 +434,12 @@ def proxy_ssl_options(parser):
action="store_true", dest="no_upstream_cert",
help="Don't connect to upstream server to look up certificate details."
)
+ group.add_argument(
+ "--add-upstream-certs-to-client-chain", default=False,
+ action="store_true", dest="add_upstream_certs_to_client_chain",
+ help="Add all certificates of the upstream server to the certificate chain "
+ "that will be served to the proxy client, as extras."
+ )
group.add_argument(
"--verify-upstream-cert", default=False,
action="store_true", dest="ssl_verify_upstream_cert",
diff --git a/mitmproxy/protocol/tls.py b/mitmproxy/protocol/tls.py
index 6913396d52..7a4d53fe00 100644
--- a/mitmproxy/protocol/tls.py
+++ b/mitmproxy/protocol/tls.py
@@ -432,6 +432,11 @@ def _establish_tls_with_client(self):
self.log("Establish TLS with client", "debug")
cert, key, chain_file = self._find_cert()
+ if self.config.add_upstream_certs_to_client_chain:
+ extra_certs = self.server_conn.server_certs
+ else:
+ extra_certs = None
+
try:
self.client_conn.convert_to_ssl(
cert, key,
@@ -441,6 +446,7 @@ def _establish_tls_with_client(self):
dhparams=self.config.certstore.dhparams,
chain_file=chain_file,
alpn_select_callback=self.__alpn_select_callback,
+ extra_chain_certs = extra_certs,
)
# Some TLS clients will not fail the handshake,
# but will immediately throw an "unexpected eof" error on the first read.
diff --git a/mitmproxy/proxy/config.py b/mitmproxy/proxy/config.py
index 149d471053..bd02c628c5 100644
--- a/mitmproxy/proxy/config.py
+++ b/mitmproxy/proxy/config.py
@@ -67,6 +67,7 @@ def __init__(
ssl_verify_upstream_cert=False,
ssl_verify_upstream_trusted_cadir=None,
ssl_verify_upstream_trusted_ca=None,
+ add_upstream_certs_to_client_chain=False,
):
self.host = host
self.port = port
@@ -107,6 +108,7 @@ def __init__(
self.openssl_verification_mode_server = SSL.VERIFY_NONE
self.openssl_trusted_cadir_server = ssl_verify_upstream_trusted_cadir
self.openssl_trusted_ca_server = ssl_verify_upstream_trusted_ca
+ self.add_upstream_certs_to_client_chain = add_upstream_certs_to_client_chain
def process_proxy_options(parser, options):
@@ -136,14 +138,26 @@ def process_proxy_options(parser, options):
"Transparent, SOCKS5, reverse and upstream proxy mode "
"are mutually exclusive. Read the docs on proxy modes to understand why."
)
-
+ if options.add_upstream_certs_to_client_chain and options.no_upstream_cert:
+ return parser.error(
+ "The no-upstream-cert and add-upstream-certs-to-client-chain "
+ "options are mutually exclusive. If no-upstream-cert is enabled "
+ "then the upstream certificate is not retrieved before generating "
+ "the client certificate chain."
+ )
+ if options.add_upstream_certs_to_client_chain and options.ssl_verify_upstream_cert:
+ return parser.error(
+ "The verify-upstream-cert and add-upstream-certs-to-client-chain "
+ "options are mutually exclusive. If upstream certificates are verified "
+ "then extra upstream certificates are not available for inclusion "
+ "to the client chain."
+ )
if options.clientcerts:
options.clientcerts = os.path.expanduser(options.clientcerts)
if not os.path.exists(options.clientcerts):
return parser.error(
- "Client certificate path does not exist: %s" % options.clientcerts
+ "Client certificate path does not exist: %s" % options.clientcerts
)
-
if options.auth_nonanonymous or options.auth_singleuser or options.auth_htpasswd:
if options.transparent_proxy:
@@ -206,5 +220,6 @@ def process_proxy_options(parser, options):
ssl_version_server=options.ssl_version_server,
ssl_verify_upstream_cert=options.ssl_verify_upstream_cert,
ssl_verify_upstream_trusted_cadir=options.ssl_verify_upstream_trusted_cadir,
- ssl_verify_upstream_trusted_ca=options.ssl_verify_upstream_trusted_ca
+ ssl_verify_upstream_trusted_ca=options.ssl_verify_upstream_trusted_ca,
+ add_upstream_certs_to_client_chain=options.add_upstream_certs_to_client_chain,
)
diff --git a/netlib/tcp.py b/netlib/tcp.py
index 6423888a14..68a7127023 100644
--- a/netlib/tcp.py
+++ b/netlib/tcp.py
@@ -584,6 +584,7 @@ def __init__(self, address, source_address=None):
self.address = address
self.source_address = source_address
self.cert = None
+ self.server_certs = []
self.ssl_verification_error = None
self.sni = None
@@ -668,6 +669,10 @@ def convert_to_ssl(self, sni=None, alpn_protos=None, **sslctx_kwargs):
self.cert = certutils.SSLCert(self.connection.get_peer_certificate())
+ # Keep all server certificates in a list
+ for i in self.connection.get_peer_cert_chain():
+ self.server_certs.append(certutils.SSLCert(i))
+
# Validate TLS Hostname
try:
crt = dict(
@@ -734,6 +739,7 @@ def create_ssl_context(self,
request_client_cert=None,
chain_file=None,
dhparams=None,
+ extra_chain_certs=None,
**sslctx_kwargs):
"""
cert: A certutils.SSLCert object or the path to a certificate
@@ -769,6 +775,10 @@ def create_ssl_context(self,
else:
context.use_certificate_chain_file(cert)
+ if extra_chain_certs:
+ for i in extra_chain_certs:
+ context.add_extra_chain_cert(i.x509)
+
if handle_sni:
# SNI callback happens during do_handshake()
context.set_tlsext_servername_callback(handle_sni)
diff --git a/pathod/pathoc.py b/pathod/pathoc.py
index c0a33b6283..64a81c9454 100644
--- a/pathod/pathoc.py
+++ b/pathod/pathoc.py
@@ -42,7 +42,8 @@ def __str__(self):
"Cipher: %s, %s bit, %s" % self.cipher,
"SSL certificate chain:"
]
- for i in self.certchain:
+ for n,i in enumerate(self.certchain):
+ parts.append(" Certificate [%s]" % n)
parts.append("\tSubject: ")
for cn in i.get_subject().get_components():
parts.append("\t\t%s=%s" % cn)
@@ -69,7 +70,7 @@ def __str__(self):
s = certutils.SSLCert(i)
if s.altnames:
parts.append("\tSANs: %s" % " ".join(s.altnames))
- return "\n".join(parts)
+ return "\n".join(parts)
diff --git a/test/mitmproxy/test_server.py b/test/mitmproxy/test_server.py
index d7b23bbb82..26e53e8aea 100644
--- a/test/mitmproxy/test_server.py
+++ b/test/mitmproxy/test_server.py
@@ -999,3 +999,43 @@ def handler(f):
# (both terminated)
# nothing happened here
assert self.chain[1].tmaster.state.flow_count() == 2
+
+
+class AddUpstreamCertsToClientChainMixin:
+
+ ssl = True
+ servercert = tutils.test_data.path("data/trusted-server.crt")
+ ssloptions = pathod.SSLOptions(
+ cn="trusted-cert",
+ certs=[
+ ("trusted-cert", servercert)
+ ]
+ )
+
+ def test_add_upstream_certs_to_client_chain(self):
+ with open(self.servercert, "rb") as f:
+ d = f.read()
+ upstreamCert = SSLCert.from_pem(d)
+ p = self.pathoc()
+ upstream_cert_found_in_client_chain = False
+ for receivedCert in p.server_certs:
+ if receivedCert.digest('sha256') == upstreamCert.digest('sha256'):
+ upstream_cert_found_in_client_chain = True
+ break
+ assert(upstream_cert_found_in_client_chain == self.add_upstream_certs_to_client_chain)
+
+
+class TestHTTPSAddUpstreamCertsToClientChainTrue(AddUpstreamCertsToClientChainMixin, tservers.HTTPProxyTest):
+
+ """
+ If --add-server-certs-to-client-chain is True, then the client should receive the upstream server's certificates
+ """
+ add_upstream_certs_to_client_chain = True
+
+
+class TestHTTPSAddUpstreamCertsToClientChainFalse(AddUpstreamCertsToClientChainMixin, tservers.HTTPProxyTest):
+
+ """
+ If --add-server-certs-to-client-chain is False, then the client should not receive the upstream server's certificates
+ """
+ add_upstream_certs_to_client_chain = False
diff --git a/test/mitmproxy/tservers.py b/test/mitmproxy/tservers.py
index b7b5de9e89..4fa519cc60 100644
--- a/test/mitmproxy/tservers.py
+++ b/test/mitmproxy/tservers.py
@@ -86,6 +86,7 @@ class ProxyTestBase(object):
no_upstream_cert = False
authenticator = None
masterclass = TestMaster
+ add_upstream_certs_to_client_chain = False
@classmethod
def setup_class(cls):
@@ -129,6 +130,7 @@ def get_proxy_config(cls):
no_upstream_cert = cls.no_upstream_cert,
cadir = cls.cadir,
authenticator = cls.authenticator,
+ add_upstream_certs_to_client_chain = cls.add_upstream_certs_to_client_chain,
)
| If enabled, append all server certificates to the certificate chain
served to the client, as extras. Can be used to test/bypass certain
certificate pinning impementations.
| https://api.github.com/repos/mitmproxy/mitmproxy/pulls/1014 | 2016-03-08T18:15:36Z | 2016-03-17T01:28:00Z | 2016-03-17T01:28:00Z | 2016-03-17T01:28:00Z | 2,358 | mitmproxy/mitmproxy | 28,365 |
Update UPDATES.md | diff --git a/UPDATES.md b/UPDATES.md
index f90b4142a..f3429d838 100644
--- a/UPDATES.md
+++ b/UPDATES.md
@@ -1,3 +1,5 @@
+# 8/7/23 Updates
+
## System Prompt Update
### Observed Issue
@@ -16,4 +18,4 @@ As noted in the documentation, these strings are required to use the fine-tuned
### Updated approach
We recommend sanitizing [these strings](https://github.com/facebookresearch/llama#fine-tuned-chat-models) from any user provided prompts. Sanitization of user prompts mitigates malicious or accidental abuse of these strings. The provided scripts have been updated to do this.
-Note: even with this update safety classifiers should still be applied to catch unsafe behaviors or content produced by the model. An [example](https://github.com/facebookresearch/llama-recipes/blob/main/inference/inference.py) of how to deploy such a classifier can be found in the llama-recipes repository.
\ No newline at end of file
+Note: even with this update safety classifiers should still be applied to catch unsafe behaviors or content produced by the model. An [example](https://github.com/facebookresearch/llama-recipes/blob/main/inference/inference.py) of how to deploy such a classifier can be found in the llama-recipes repository.
| Updating to add the date of the updates.. | https://api.github.com/repos/meta-llama/llama/pulls/664 | 2023-08-11T13:22:26Z | 2023-08-11T14:38:07Z | 2023-08-11T14:38:07Z | 2023-08-11T14:38:07Z | 301 | meta-llama/llama | 31,998 |
Add heroku_multiple_apps rule | diff --git a/README.md b/README.md
index 4a8b5bf77..dd8cfae14 100644
--- a/README.md
+++ b/README.md
@@ -218,6 +218,7 @@ using the matched rule and runs it. Rules enabled by default are as follows:
* `grunt_task_not_found` – fixes misspelled `grunt` commands;
* `gulp_not_task` – fixes misspelled `gulp` tasks;
* `has_exists_script` – prepends `./` when script/binary exists;
+* `heroku_multiple_apps` – add `--app <app>` to `heroku` commands like `heroku pg`;
* `heroku_not_command` – fixes wrong `heroku` commands like `heroku log`;
* `history` – tries to replace command with most similar command from history;
* `hostscli` – tries to fix `hostscli` usage;
diff --git a/tests/rules/test_heroku_multiple_apps.py b/tests/rules/test_heroku_multiple_apps.py
new file mode 100644
index 000000000..c6b4f1e5f
--- /dev/null
+++ b/tests/rules/test_heroku_multiple_apps.py
@@ -0,0 +1,55 @@
+# -*- coding: utf-8 -*-
+
+import pytest
+from thefuck.types import Command
+from thefuck.rules.heroku_multiple_apps import match, get_new_command
+
+
+suggest_output = '''
+ ▸ Multiple apps in git remotes
+ ▸ Usage: --remote heroku-dev
+ ▸ or: --app myapp-dev
+ ▸ Your local git repository has more than 1 app referenced in git remotes.
+ ▸ Because of this, we can't determine which app you want to run this command against.
+ ▸ Specify the app you want with --app or --remote.
+ ▸ Heroku remotes in repo:
+ ▸ myapp (heroku)
+ ▸ myapp-dev (heroku-dev)
+ ▸
+ ▸ https://devcenter.heroku.com/articles/multiple-environments
+'''
+
+not_match_output = '''
+=== HEROKU_POSTGRESQL_TEAL_URL, DATABASE_URL
+Plan: Hobby-basic
+Status: Available
+Connections: 20/20
+PG Version: 9.6.4
+Created: 2017-01-01 00:00 UTC
+Data Size: 99.9 MB
+Tables: 99
+Rows: 12345/10000000 (In compliance)
+Fork/Follow: Unsupported
+Rollback: Unsupported
+Continuous Protection: Off
+Add-on: postgresql-round-12345
+'''
+
+
[email protected]('cmd', ['pg'])
+def test_match(cmd):
+ assert match(
+ Command('heroku {}'.format(cmd), suggest_output))
+
+
[email protected]('script, output', [
+ ('heroku pg', not_match_output)])
+def test_not_match(script, output):
+ assert not match(Command(script, output))
+
+
[email protected]('cmd, result', [
+ ('pg', ['heroku pg --app myapp', 'heroku pg --app myapp-dev'])])
+def test_get_new_command(cmd, result):
+ command = Command('heroku {}'.format(cmd), suggest_output)
+ assert get_new_command(command) == result
diff --git a/thefuck/rules/heroku_multiple_apps.py b/thefuck/rules/heroku_multiple_apps.py
new file mode 100644
index 000000000..268968bb6
--- /dev/null
+++ b/thefuck/rules/heroku_multiple_apps.py
@@ -0,0 +1,12 @@
+import re
+from thefuck.utils import for_app
+
+
+@for_app('heroku')
+def match(command):
+ return 'https://devcenter.heroku.com/articles/multiple-environments' in command.output
+
+
+def get_new_command(command):
+ apps = re.findall('([^ ]*) \([^)]*\)', command.output)
+ return [command.script + ' --app ' + app for app in apps]
| Closes https://github.com/nvbn/thefuck/issues/728 | https://api.github.com/repos/nvbn/thefuck/pulls/729 | 2017-11-07T01:13:54Z | 2017-11-09T23:42:24Z | 2017-11-09T23:42:23Z | 2018-03-06T16:08:00Z | 945 | nvbn/thefuck | 30,882 |
stabilized multi account for apigw test_invoke_method | diff --git a/localstack/services/apigateway/provider.py b/localstack/services/apigateway/provider.py
index 6ce01ecf8df30..0bb65a40f860a 100644
--- a/localstack/services/apigateway/provider.py
+++ b/localstack/services/apigateway/provider.py
@@ -178,6 +178,8 @@ def test_invoke_method(
invocation_context.method = request.get("httpMethod")
invocation_context.api_id = request.get("restApiId")
invocation_context.path_with_query_string = request.get("pathWithQueryString")
+ invocation_context.region_name = context.region
+ invocation_context.account_id = context.account_id
moto_rest_api = get_moto_rest_api(context=context, rest_api_id=invocation_context.api_id)
resource = moto_rest_api.resources.get(request["resourceId"])
| <!-- Please refer to the contribution guidelines before raising a PR: https://github.com/localstack/localstack/blob/master/CONTRIBUTING.md -->
<!-- Why am I raising this PR? Add context such as related issues, PRs, or documentation. -->
## Motivation
This PR improves the multi-account functionality for the API Gateway `test_invoke_method`. Previously, the `account_id` within the `invocation_context` was not being set and defaulted to "none". To rectify this, the `account_id` and `region` are now set from the `context` variable, with reference to the `invocation_context.api_id`.
<!-- What notable changes does this PR make? -->
## Changes
The `invocation_context` object's `account_id` and `region` parameters have been updated to reflect accurate information extracted from the API ID.
## Testing
The tests were failing previously when executed with a non-default account ID, set through environment variables (`TEST_AWS_ACCOUNT_ID=111111111111, TEST_AWS_ACCESS_KEY_ID=111111111111, TEST_AWS_REGION=us-west-1`). The affected tests are:
- `tests.aws.services.apigateway.test_apigateway_api.TestApiGatewayApi.test_invoke_test_method`
## TODO
- [ ] Validate the changes by running the -ext test suite | https://api.github.com/repos/localstack/localstack/pulls/9547 | 2023-11-03T10:00:56Z | 2023-11-07T12:03:00Z | 2023-11-07T12:03:00Z | 2023-11-07T12:03:01Z | 179 | localstack/localstack | 28,686 |
[email] bpo-29478: Fix passing max_line_length=None from Compat32 policy | diff --git a/Lib/email/_policybase.py b/Lib/email/_policybase.py
index df4649676aed72..c9cbadd2a80c48 100644
--- a/Lib/email/_policybase.py
+++ b/Lib/email/_policybase.py
@@ -361,8 +361,12 @@ def _fold(self, name, value, sanitize):
# Assume it is a Header-like object.
h = value
if h is not None:
- parts.append(h.encode(linesep=self.linesep,
- maxlinelen=self.max_line_length))
+ # The Header class interprets a value of None for maxlinelen as the
+ # default value of 78, as recommended by RFC 2822.
+ maxlinelen = 0
+ if self.max_line_length is not None:
+ maxlinelen = self.max_line_length
+ parts.append(h.encode(linesep=self.linesep, maxlinelen=maxlinelen))
parts.append(self.linesep)
return ''.join(parts)
diff --git a/Lib/test/test_email/test_generator.py b/Lib/test/test_email/test_generator.py
index 7c8877fdcb090e..c4f182903afefe 100644
--- a/Lib/test/test_email/test_generator.py
+++ b/Lib/test/test_email/test_generator.py
@@ -162,6 +162,13 @@ def test_set_mangle_from_via_policy(self):
g.flatten(msg)
self.assertEqual(s.getvalue(), self.typ(expected))
+ def test_compat32_max_line_length_does_not_fold_when_none(self):
+ msg = self.msgmaker(self.typ(self.refold_long_expected[0]))
+ s = self.ioclass()
+ g = self.genclass(s, policy=policy.compat32.clone(max_line_length=None))
+ g.flatten(msg)
+ self.assertEqual(s.getvalue(), self.typ(self.refold_long_expected[0]))
+
class TestGenerator(TestGeneratorBase, TestEmailBase):
diff --git a/Misc/ACKS b/Misc/ACKS
index 74ac15b89ece6b..1ae9b6fe5d0a2e 100644
--- a/Misc/ACKS
+++ b/Misc/ACKS
@@ -310,6 +310,7 @@ Garrett Cooper
Greg Copeland
Ian Cordasco
Aldo Cortesi
+Mircea Cosbuc
David Costanzo
Scott Cotton
Greg Couch
diff --git a/Misc/NEWS b/Misc/NEWS
index 98d8ef96b3e6c2..88f1631c315761 100644
--- a/Misc/NEWS
+++ b/Misc/NEWS
@@ -142,6 +142,9 @@ Core and Builtins
- bpo-29546: Improve from-import error message with location
+- bpo-29478: If max_line_length=None is specified while using the Compat32 policy,
+ it is no longer ignored. Patch by Mircea Cosbuc.
+
- Issue #29319: Prevent RunMainFromImporter overwriting sys.path[0].
- Issue #29337: Fixed possible BytesWarning when compare the code objects.
| This fixes an [issue](https://bugs.python.org/issue29478) with the `Compat32` email policy where setting the `max_line_length` attribute to `None` should prevent line wrapping as specified in the [docs](https://docs.python.org/release/3.5.2/library/email.policy.html#email.policy.Policy.max_line_length). | https://api.github.com/repos/python/cpython/pulls/595 | 2017-03-10T11:20:14Z | 2017-06-12T06:43:42Z | 2017-06-12T06:43:42Z | 2017-06-16T02:08:56Z | 690 | python/cpython | 4,193 |
Fixed typo | diff --git a/nodepad/README.md b/nodepad/README.md
index cd9465c7d0..e3fd99dfb9 100644
--- a/nodepad/README.md
+++ b/nodepad/README.md
@@ -1,7 +1,7 @@
# Simple note managment
This app is a simple note managment (notepad). You can write notes and save it with a title. In addition you can pull previous notes back.
-The app is writen in **python 2** and uses [Tkinter](https://docs.python.org/2/library/tkinter.html) as gui-librarie. Furthermore the UI was created with the gui-builder [PAGE](http://page.sourceforge.net/). The app uses the [Sqlite](https://www.sqlite.org/) database for managing the notes.
+The app is writen in **python 2** and uses [Tkinter](https://docs.python.org/2/library/tkinter.html) as gui-library. Furthermore the UI was created with the gui-builder [PAGE](http://page.sourceforge.net/). The app uses the [Sqlite](https://www.sqlite.org/) database for managing the notes.
### Dependencies
@@ -23,4 +23,4 @@ You add new notes over the tab Add and displays the notes over the tab display.
### How it looks like?
-![screenshot of the app](img/screenshot.png "")
\ No newline at end of file
+![screenshot of the app](img/screenshot.png "")
| https://api.github.com/repos/geekcomputers/Python/pulls/721 | 2020-04-24T19:19:32Z | 2020-04-25T11:32:01Z | 2020-04-25T11:32:01Z | 2020-04-25T11:32:01Z | 324 | geekcomputers/Python | 31,495 |
|
🌐 Add Japanese translation for Project Generation | diff --git a/docs/ja/docs/project-generation.md b/docs/ja/docs/project-generation.md
new file mode 100644
index 0000000000000..4b6f0f9fd0cc6
--- /dev/null
+++ b/docs/ja/docs/project-generation.md
@@ -0,0 +1,84 @@
+# プロジェクト生成 - テンプレート
+
+プロジェクトジェネレーターは、初期設定、セキュリティ、データベース、初期APIエンドポイントなどの多くが含まれているため、プロジェクトの開始に利用できます。
+
+プロジェクトジェネレーターは常に非常に意見が分かれる設定がされており、ニーズに合わせて更新および調整する必要があります。しかしきっと、プロジェクトの良い出発点となるでしょう。
+
+## フルスタック FastAPI PostgreSQL
+
+GitHub: <a href="https://github.com/tiangolo/full-stack-fastapi-postgresql" class="external-link" target="_blank">https://github.com/tiangolo/full-stack-fastapi-postgresql</a>
+
+### フルスタック FastAPI PostgreSQL - 機能
+
+* 完全な**Docker**インテグレーション (Dockerベース)。
+* Docker Swarm モードデプロイ。
+* ローカル開発環境向けの**Docker Compose**インテグレーションと最適化。
+* UvicornとGunicornを使用した**リリース可能な** Python web サーバ。
+* Python <a href="https://github.com/tiangolo/fastapi" class="external-link" target="_blank">**FastAPI**</a> バックエンド:
+ * **高速**: **NodeJS** や **Go** 並みのとても高いパフォーマンス (Starlette と Pydantic のおかげ)。
+ * **直感的**: 素晴らしいエディタのサポートや <abbr title="自動補完、インテリセンスとも呼ばれる">補完。</abbr> デバッグ時間の短縮。
+ * **簡単**: 簡単に利用、習得できるようなデザイン。ドキュメントを読む時間を削減。
+ * **短い**: コードの重複を最小限に。パラメータ宣言による複数の機能。
+ * **堅牢性**: 自動対話ドキュメントを使用した、本番環境で使用できるコード。
+ * **標準規格準拠**: API のオープンスタンダードに基く、完全な互換性: <a href="https://github.com/OAI/OpenAPI-Specification" class="external-link" target="_blank">OpenAPI</a>や <a href="http://json-schema.org/" class="external-link" target="_blank">JSON スキーマ</a>。
+ * 自動バリデーション、シリアライゼーション、対話的なドキュメント、OAuth2 JWTトークンを用いた認証などを含む、<a href="https://fastapi.tiangolo.com/features/" class="external-link" target="_blank">**その他多くの機能**</a>。
+* **セキュアなパスワード** ハッシュ化 (デフォルトで)。
+* **JWTトークン** 認証。
+* **SQLAlchemy** モデル (Flask用の拡張と独立しているので、Celeryワーカーと直接的に併用できます)。
+* 基本的なユーザーモデル (任意の修正や削除が可能)。
+* **Alembic** マイグレーション。
+* **CORS** (Cross Origin Resource Sharing (オリジン間リソース共有))。
+* **Celery** ワーカー。バックエンドの残りの部分からモデルとコードを選択的にインポートし、使用可能。
+* Dockerと統合された**Pytest**ベースのRESTバックエンドテスト。データベースに依存せずに、全てのAPIをテスト可能。Docker上で動作するので、毎回ゼロから新たなデータストアを構築可能。(ElasticSearch、MongoDB、CouchDBなどを使用して、APIの動作をテスト可能)
+* Atom HydrogenやVisual Studio Code Jupyterなどの拡張機能を使用した、リモートまたはDocker開発用の**Jupyterカーネル**との簡単なPython統合。
+* **Vue** フロントエンド:
+ * Vue CLIにより生成。
+ * **JWT認証**の処理。
+ * ログインビュー。
+ * ログイン後の、メインダッシュボードビュー。
+ * メインダッシュボードでのユーザー作成と編集。
+ * セルフユーザー版
+ * **Vuex**。
+ * **Vue-router**。
+ * 美しいマテリアルデザインコンポーネントのための**Vuetify**。
+ * **TypeScript**。
+ * **Nginx**ベースのDockerサーバ (Vue-routerとうまく協調する構成)。
+ * Dockerマルチステージビルド。コンパイルされたコードの保存やコミットが不要。
+ * ビルド時にフロントエンドテスト実行 (無効化も可能)。
+ * 可能な限りモジュール化されているのでそのまま使用できますが、Vue CLIで再生成したり、必要に応じて作成したりして、必要なものを再利用可能。
+* PostgreSQLデータベースのための**PGAdmin**。(PHPMyAdminとMySQLを使用できるように簡単に変更可能)
+* Celeryジョブ監視のための**Flower**。
+* **Traefik**を使用してフロントエンドとバックエンド間をロードバランシング。同一ドメインに配置しパスで区切る、ただし、異なるコンテナで処理。
+* Traefik統合。Let's Encrypt **HTTPS**証明書の自動生成を含む。
+* GitLab **CI** (継続的インテグレーション)。フロントエンドおよびバックエンドテストを含む。
+
+## フルスタック FastAPI Couchbase
+
+GitHub: <a href="https://github.com/tiangolo/full-stack-fastapi-couchbase" class="external-link" target="_blank">https://github.com/tiangolo/full-stack-fastapi-couchbase</a>
+
+⚠️ **警告** ⚠️
+
+ゼロから新規プロジェクトを始める場合は、ここで代替案を確認してください。
+
+例えば、<a href="https://github.com/tiangolo/full-stack-fastapi-postgresql" class="external-link" target="_blank">フルスタック FastAPI PostgreSQL</a>のプロジェクトジェネレーターは、積極的にメンテナンスされ、利用されているのでより良い代替案かもしれません。また、すべての新機能と改善点が含まれています。
+
+Couchbaseベースのジェネレーターは今も無償提供されています。恐らく正常に動作するでしょう。また、すでにそのジェネレーターで生成されたプロジェクトが存在する場合でも (ニーズに合わせてアップデートしているかもしれません)、同様に正常に動作するはずです。
+
+詳細はレポジトリのドキュメントを参照して下さい。
+
+## フルスタック FastAPI MongoDB
+
+...時間の都合等によっては、今後作成されるかもしれません。😅 🎉
+
+## spaCyとFastAPIを使用した機械学習モデル
+
+GitHub: <a href="https://github.com/microsoft/cookiecutter-spacy-fastapi" class="external-link" target="_blank">https://github.com/microsoft/cookiecutter-spacy-fastapi</a>
+
+### spaCyとFastAPIを使用した機械学習モデル - 機能
+
+* **spaCy** のNERモデルの統合。
+* **Azure Cognitive Search** のリクエストフォーマットを搭載。
+* **リリース可能な** UvicornとGunicornを使用したPythonウェブサーバ。
+* **Azure DevOps** のKubernetes (AKS) CI/CD デプロイを搭載。
+* **多言語** プロジェクトのために、セットアップ時に言語を容易に選択可能 (spaCyに組み込まれている言語の中から)。
+* **簡単に拡張可能**。spaCyだけでなく、他のモデルフレームワーク (Pytorch、Tensorflow) へ。
diff --git a/docs/ja/mkdocs.yml b/docs/ja/mkdocs.yml
index 5b2c147f6758d..22ef92b373fef 100644
--- a/docs/ja/mkdocs.yml
+++ b/docs/ja/mkdocs.yml
@@ -44,6 +44,7 @@ nav:
- tutorial/query-params.md
- tutorial/body.md
- tutorial/header-params.md
+- project-generation.md
- alternatives.md
- history-design-future.md
- benchmarks.md
| This PR translates the project-generation.md to Japanese.
@ryuckel @SwftAlpc @komtaki @Attsun1031 and others
Feel free to comment your advise or approve. | https://api.github.com/repos/tiangolo/fastapi/pulls/2050 | 2020-09-14T16:48:53Z | 2020-10-18T05:47:45Z | 2020-10-18T05:47:45Z | 2020-10-18T07:53:49Z | 2,122 | tiangolo/fastapi | 23,329 |
mock<1.1.0 only for py2.6. | diff --git a/acme/setup.py b/acme/setup.py
index 6d820841490..5f1da239149 100644
--- a/acme/setup.py
+++ b/acme/setup.py
@@ -9,7 +9,6 @@
# load_pem_private/public_key (>=0.6)
# rsa_recover_prime_factors (>=0.8)
'cryptography>=0.8',
- 'mock<1.1.0', # py26
'pyrfc3339',
'ndg-httpsclient', # urllib3 InsecurePlatformWarning (#304)
'pyasn1', # urllib3 InsecurePlatformWarning (#304)
@@ -23,8 +22,13 @@
# env markers in extras_require cause problems with older pip: #517
if sys.version_info < (2, 7):
- # only some distros recognize stdlib argparse as already satisfying
- install_requires.append('argparse')
+ install_requires.extend([
+ # only some distros recognize stdlib argparse as already satisfying
+ 'argparse',
+ 'mock<1.1.0',
+ ])
+else:
+ install_requires.append('mock')
testing_extras = [
'nose',
diff --git a/letsencrypt-apache/setup.py b/letsencrypt-apache/setup.py
index 39f4b68e1f9..5f1b0a95d21 100644
--- a/letsencrypt-apache/setup.py
+++ b/letsencrypt-apache/setup.py
@@ -1,3 +1,5 @@
+import sys
+
from setuptools import setup
from setuptools import find_packages
@@ -5,12 +7,16 @@
install_requires = [
'acme',
'letsencrypt',
- 'mock<1.1.0', # py26
'python-augeas',
'zope.component',
'zope.interface',
]
+if sys.version_info < (2, 7):
+ install_requires.append('mock<1.1.0')
+else:
+ install_requires.append('mock')
+
setup(
name='letsencrypt-apache',
packages=find_packages(),
diff --git a/letsencrypt-compatibility-test/setup.py b/letsencrypt-compatibility-test/setup.py
index f02041e55aa..99e66f54f0e 100644
--- a/letsencrypt-compatibility-test/setup.py
+++ b/letsencrypt-compatibility-test/setup.py
@@ -1,3 +1,5 @@
+import sys
+
from setuptools import setup
from setuptools import find_packages
@@ -7,10 +9,14 @@
'letsencrypt-apache',
'letsencrypt-nginx',
'docker-py',
- 'mock<1.1.0', # py26
'zope.interface',
]
+if sys.version_info < (2, 7):
+ install_requires.append('mock<1.1.0')
+else:
+ install_requires.append('mock')
+
setup(
name='letsencrypt-compatibility-test',
packages=find_packages(),
diff --git a/letsencrypt-nginx/setup.py b/letsencrypt-nginx/setup.py
index 92b974974b6..64742e7b6e2 100644
--- a/letsencrypt-nginx/setup.py
+++ b/letsencrypt-nginx/setup.py
@@ -1,3 +1,5 @@
+import sys
+
from setuptools import setup
from setuptools import find_packages
@@ -6,10 +8,14 @@
'acme',
'letsencrypt',
'pyparsing>=1.5.5', # Python3 support; perhaps unnecessary?
- 'mock<1.1.0', # py26
'zope.interface',
]
+if sys.version_info < (2, 7):
+ install_requires.append('mock<1.1.0')
+else:
+ install_requires.append('mock')
+
setup(
name='letsencrypt-nginx',
packages=find_packages(),
diff --git a/letshelp-letsencrypt/setup.py b/letshelp-letsencrypt/setup.py
index 6b89a6d0920..7d42af88b8d 100644
--- a/letshelp-letsencrypt/setup.py
+++ b/letshelp-letsencrypt/setup.py
@@ -6,17 +6,17 @@
install_requires = []
if sys.version_info < (2, 7):
- install_requires.append("mock<1.1.0")
+ install_requires.append('mock<1.1.0')
else:
- install_requires.append("mock")
+ install_requires.append('mock')
setup(
- name="letshelp-letsencrypt",
+ name='letshelp-letsencrypt',
packages=find_packages(),
install_requires=install_requires,
entry_points={
'console_scripts': [
- "letshelp-letsencrypt-apache = letshelp_letsencrypt.apache:main",
+ 'letshelp-letsencrypt-apache = letshelp_letsencrypt.apache:main',
],
},
include_package_data=True,
diff --git a/setup.py b/setup.py
index a40303e509f..5a7d75a5fe5 100644
--- a/setup.py
+++ b/setup.py
@@ -34,7 +34,6 @@ def read_file(filename, encoding='utf8'):
'ConfigArgParse',
'configobj',
'cryptography>=0.7', # load_pem_x509_certificate
- 'mock<1.1.0', # py26
'parsedatetime',
'psutil>=2.1.0', # net_connections introduced in 2.1.0
'PyOpenSSL',
@@ -47,8 +46,13 @@ def read_file(filename, encoding='utf8'):
# env markers in extras_require cause problems with older pip: #517
if sys.version_info < (2, 7):
- # only some distros recognize stdlib argparse as already satisfying
- install_requires.append('argparse')
+ install_requires.extend([
+ # only some distros recognize stdlib argparse as already satisfying
+ 'argparse',
+ 'mock<1.1.0',
+ ])
+else:
+ install_requires.append('mock')
dev_extras = [
# Pin astroid==1.3.5, pylint==1.4.2 as a workaround for #289
| https://api.github.com/repos/certbot/certbot/pulls/708 | 2015-08-24T08:12:20Z | 2015-08-27T16:22:16Z | 2015-08-27T16:22:16Z | 2016-05-06T19:21:33Z | 1,425 | certbot/certbot | 933 |
|
see if pygit2 is fixed by changing a commit | diff --git a/fooocus_version.py b/fooocus_version.py
index dacf52b62..a4b7a399d 100644
--- a/fooocus_version.py
+++ b/fooocus_version.py
@@ -1 +1 @@
-version = '2.0.86'
+version = '2.0.87'
diff --git a/launch.py b/launch.py
index 900efc3d9..06184ef3f 100644
--- a/launch.py
+++ b/launch.py
@@ -22,7 +22,7 @@ def prepare_environment():
xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.20')
comfy_repo = os.environ.get('COMFY_REPO', "https://github.com/comfyanonymous/ComfyUI")
- comfy_commit_hash = os.environ.get('COMFY_COMMIT_HASH', "2ef459b1d4d627929c84d11e5e0cbe3ded9c9f48")
+ comfy_commit_hash = os.environ.get('COMFY_COMMIT_HASH', "9bfec2bdbf0b0d778087a9b32f79e57e2d15b913")
print(f"Python {sys.version}")
print(f"Fooocus version: {fooocus_version.version}")
| https://api.github.com/repos/lllyasviel/Fooocus/pulls/538 | 2023-10-04T20:21:16Z | 2023-10-04T20:21:31Z | 2023-10-04T20:21:31Z | 2023-10-04T20:21:33Z | 292 | lllyasviel/Fooocus | 7,244 |
|
reword extension python version item | diff --git a/docs/extensiondev.rst b/docs/extensiondev.rst
index fb76759498..2836d78dab 100644
--- a/docs/extensiondev.rst
+++ b/docs/extensiondev.rst
@@ -323,9 +323,10 @@ ecosystem remain consistent and compatible.
6. The documentation must use the ``flask`` theme from the
`Official Pallets Themes`_. A link to the documentation or project
website must be in the PyPI metadata or the readme.
-7. The active versions of Python should be supported. As of 2020 this
- means Python 3.5 and newer.
-
+7. For maximum compatibility, the extension should support the same
+ versions of Python that Flask supports. 3.6+ is recommended as of
+ 2020. Use ``python_requires=">= 3.6"`` in ``setup.py`` to indicate
+ supported versions.
.. _PyPI: https://pypi.org/search/?c=Framework+%3A%3A+Flask
.. _OAuth extension: https://pythonhosted.org/Flask-OAuth/
| Based on https://github.com/pallets/flask/pull/3374#discussion_r327215251. This isn't a hard requirement, so I'm using "should" instead of "must". | https://api.github.com/repos/pallets/flask/pulls/3375 | 2019-09-23T19:02:05Z | 2019-09-23T19:10:30Z | 2019-09-23T19:10:30Z | 2020-11-14T01:52:40Z | 256 | pallets/flask | 20,528 |
gh-107963: Fix set_forkserver_preload to check the type of given list | diff --git a/Lib/multiprocessing/forkserver.py b/Lib/multiprocessing/forkserver.py
index 22a911a7a29cdc..4642707dae2f4e 100644
--- a/Lib/multiprocessing/forkserver.py
+++ b/Lib/multiprocessing/forkserver.py
@@ -61,7 +61,7 @@ def _stop_unlocked(self):
def set_forkserver_preload(self, modules_names):
'''Set list of module names to try to load in forkserver process.'''
- if not all(type(mod) is str for mod in self._preload_modules):
+ if not all(type(mod) is str for mod in modules_names):
raise TypeError('module_names must be a list of strings')
self._preload_modules = modules_names
diff --git a/Lib/test/_test_multiprocessing.py b/Lib/test/_test_multiprocessing.py
index f881a5d4674699..10754964e73bc5 100644
--- a/Lib/test/_test_multiprocessing.py
+++ b/Lib/test/_test_multiprocessing.py
@@ -5369,6 +5369,14 @@ def test_context(self):
self.assertRaises(ValueError, ctx.set_start_method, None)
self.check_context(ctx)
+ def test_context_check_module_types(self):
+ try:
+ ctx = multiprocessing.get_context('forkserver')
+ except ValueError:
+ raise unittest.SkipTest('forkserver should be available')
+ with self.assertRaisesRegex(TypeError, 'module_names must be a list of strings'):
+ ctx.set_forkserver_preload([1, 2, 3])
+
def test_set_get(self):
multiprocessing.set_forkserver_preload(PRELOAD)
count = 0
diff --git a/Misc/NEWS.d/next/Library/2023-08-15-18-20-00.gh-issue-107963.20g5BG.rst b/Misc/NEWS.d/next/Library/2023-08-15-18-20-00.gh-issue-107963.20g5BG.rst
new file mode 100644
index 00000000000000..3a73b2da0c4334
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2023-08-15-18-20-00.gh-issue-107963.20g5BG.rst
@@ -0,0 +1,2 @@
+Fix :func:`multiprocessing.set_forkserver_preload` to check the given list
+of modules names. Patch by Dong-hee Na.
| <!--
Thanks for your contribution!
Please read this comment in its entirety. It's quite important.
# Pull Request title
It should be in the following format:
```
gh-NNNNN: Summary of the changes made
```
Where: gh-NNNNN refers to the GitHub issue number.
Most PRs will require an issue number. Trivial changes, like fixing a typo, do not need an issue.
# Backport Pull Request title
If this is a backport PR (PR made against branches other than `main`),
please ensure that the PR title is in the following format:
```
[X.Y] <title from the original PR> (GH-NNNN)
```
Where: [X.Y] is the branch name, e.g. [3.6].
GH-NNNN refers to the PR number from `main`.
-->
<!-- gh-issue-number: gh-107963 -->
* Issue: gh-107963
<!-- /gh-issue-number -->
| https://api.github.com/repos/python/cpython/pulls/107965 | 2023-08-15T08:58:21Z | 2023-08-15T13:58:13Z | 2023-08-15T13:58:13Z | 2023-08-15T23:51:42Z | 565 | python/cpython | 4,443 |
Fixed Inappropriate Logical Expression | diff --git a/data_structures/binary_tree/red_black_tree.py b/data_structures/binary_tree/red_black_tree.py
index 4ebe0e927ca0..fc299301da8a 100644
--- a/data_structures/binary_tree/red_black_tree.py
+++ b/data_structures/binary_tree/red_black_tree.py
@@ -451,7 +451,7 @@ def is_left(self) -> bool:
"""Returns true iff this node is the left child of its parent."""
if self.parent is None:
return False
- return self.parent.left is self.parent.left is self
+ return self.parent.left is self
def is_right(self) -> bool:
"""Returns true iff this node is the right child of its parent."""
| ### Describe your change:
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Add or change doctests? -- Note: Please avoid changing both code and tests in a single pull request.
* [ ] Documentation change?
---
## Details
While triaging your project, our bug fixing tool generated the following message(s)-
> In file: [red_black_tree.py](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/red_black_tree.py#L454), method: `is_left`, a logical equality check operation was performed with the same operand on both sides. The comparison operation always returns either true or false. Such logical short circuits in code lead to unintended behavior. iCR suggested that the logical operation should be reviewed for correctness.
The expression `self.parent.left is self.parent.left is self` is not clear in its intent. The chaining of is operators can make it hard to understand what exactly is being checked.
## CLA Requirements
*This section is only relevant if your project requires contributors to sign a Contributor License Agreement (CLA) for external contributions.*
All contributed commits are already automatically signed off.
> The meaning of a signoff depends on the project, but it typically certifies that committer has the rights to submit this work under the same license and agrees to a Developer Certificate of Origin (see [https://developercertificate.org/](https://developercertificate.org/) for more information).
\- [Git Commit SignOff documentation](https://developercertificate.org/)
## Sponsorship and Support
This work is done by the security researchers from OpenRefactory and is supported by the [Open Source Security Foundation (OpenSSF)](https://openssf.org/): [Project Alpha-Omega](https://alpha-omega.dev/). Alpha-Omega is a project partnering with open source software project maintainers to systematically find new, as-yet-undiscovered vulnerabilities in open source code - and get them fixed – to improve global software supply chain security.
The bug is found by running the Intelligent Code Repair (iCR) tool by OpenRefactory and then manually triaging the results.
---
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms include at least one URL that points to Wikipedia or another similar explanation.
* [x] If this pull request resolves one or more open issues then the description above includes the issue number(s) with a [closing keyword](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue): "Fixes #ISSUE-NUMBER".
| https://api.github.com/repos/TheAlgorithms/Python/pulls/11203 | 2023-12-07T11:53:15Z | 2024-01-16T08:43:33Z | 2024-01-16T08:43:33Z | 2024-01-16T08:43:34Z | 166 | TheAlgorithms/Python | 29,881 |
Adds NOAA Climate Data Online API | diff --git a/README.md b/README.md
index f40367825a..b875238ed8 100644
--- a/README.md
+++ b/README.md
@@ -784,6 +784,7 @@ API | Description | Auth | HTTPS | CORS | Link |
| ClimaCell Micro Weather | Historical, real-time, and nowcast weather data | `apiKey` | Yes | Yes | [Go!](https://developer.climacell.co) |
| Dark Sky | Weather | `apiKey` | Yes | No | [Go!](https://darksky.net/dev/) |
| MetaWeather | Weather | No | Yes | No | [Go!](https://www.metaweather.com/api/) |
+| NOAA Climate Data | Weather and climate data | `apiKey` | Yes | Unknown | [Go!](https://www.ncdc.noaa.gov/cdo-web/) |
| ODWeather | Weather and weather webcams | No | No | Unknown | [Go!](http://api.oceandrivers.com/static/docs.html) |
| OpenUV | Real-time UV Index Forecast | `apiKey` | Yes | Unknown | [Go!](https://www.openuv.io) |
| OpenWeatherMap | Weather | `apiKey` | No | Unknown | [Go!](http://openweathermap.org/api) |
| Thank you for taking the time to work on a Pull Request for this project!
To ensure your PR is dealt with swiftly please check the following:
- [x] Your submissions are formatted according to the guidelines in the [contributing guide](CONTRIBUTING.md)
- [x] Your additions are ordered alphabetically
- [x] Your submission has a useful description
- [x] The description does not end with punctuation
- [x] Each table column should be padded with one space on either side
- [x] You have searched the repository for any relevant issues or pull requests
- [x] Any category you are creating has the minimum requirement of 3 items
- [x] All changes have been [squashed][squash-link] into a single commit
[squash-link]: <https://github.com/todotxt/todo.txt-android/wiki/Squash-All-Commits-Related-to-a-Single-Issue-into-a-Single-Commit>
| https://api.github.com/repos/public-apis/public-apis/pulls/712 | 2018-07-07T14:31:45Z | 2018-07-13T02:49:00Z | 2018-07-13T02:49:00Z | 2018-07-14T13:59:03Z | 282 | public-apis/public-apis | 35,319 |
update space_video_api arg: page size | diff --git a/src/you_get/extractors/bilibili.py b/src/you_get/extractors/bilibili.py
index 5cd47e1050..49334d5bd7 100644
--- a/src/you_get/extractors/bilibili.py
+++ b/src/you_get/extractors/bilibili.py
@@ -121,7 +121,7 @@ def bilibili_space_favlist_api(fid, pn=1, ps=20):
return 'https://api.bilibili.com/x/v3/fav/resource/list?media_id=%s&pn=%s&ps=%s&order=mtime&type=0&tid=0&jsonp=jsonp' % (fid, pn, ps)
@staticmethod
- def bilibili_space_video_api(mid, pn=1, ps=100):
+ def bilibili_space_video_api(mid, pn=1, ps=50):
return "https://api.bilibili.com/x/space/arc/search?mid=%s&pn=%s&ps=%s&tid=0&keyword=&order=pubdate&jsonp=jsonp" % (mid, pn, ps)
@staticmethod
| error msg: Key: 'SearchArg.Ps' Error:Field validation for 'Ps' failed on the 'lte' tag | https://api.github.com/repos/soimort/you-get/pulls/2951 | 2022-03-10T18:38:57Z | 2022-03-17T15:35:48Z | 2022-03-17T15:35:48Z | 2022-03-17T18:06:37Z | 256 | soimort/you-get | 21,476 |
Use VacuumEntityFeature in roomba | diff --git a/homeassistant/components/roomba/braava.py b/homeassistant/components/roomba/braava.py
index 90298078e423..ea08829cba6e 100644
--- a/homeassistant/components/roomba/braava.py
+++ b/homeassistant/components/roomba/braava.py
@@ -1,7 +1,7 @@
"""Class for Braava devices."""
import logging
-from homeassistant.components.vacuum import SUPPORT_FAN_SPEED
+from homeassistant.components.vacuum import VacuumEntityFeature
from .irobot_base import SUPPORT_IROBOT, IRobotVacuum
@@ -23,7 +23,7 @@
BRAAVA_SPRAY_AMOUNT = [1, 2, 3]
# Braava Jets can set mopping behavior through fanspeed
-SUPPORT_BRAAVA = SUPPORT_IROBOT | SUPPORT_FAN_SPEED
+SUPPORT_BRAAVA = SUPPORT_IROBOT | VacuumEntityFeature.FAN_SPEED
class BraavaJet(IRobotVacuum):
diff --git a/homeassistant/components/roomba/irobot_base.py b/homeassistant/components/roomba/irobot_base.py
index d7e3266f5fa1..ab0c1f3f842e 100644
--- a/homeassistant/components/roomba/irobot_base.py
+++ b/homeassistant/components/roomba/irobot_base.py
@@ -10,16 +10,8 @@
STATE_DOCKED,
STATE_ERROR,
STATE_RETURNING,
- SUPPORT_BATTERY,
- SUPPORT_LOCATE,
- SUPPORT_PAUSE,
- SUPPORT_RETURN_HOME,
- SUPPORT_SEND_COMMAND,
- SUPPORT_START,
- SUPPORT_STATE,
- SUPPORT_STATUS,
- SUPPORT_STOP,
StateVacuumEntity,
+ VacuumEntityFeature,
)
from homeassistant.const import STATE_IDLE, STATE_PAUSED
import homeassistant.helpers.device_registry as dr
@@ -40,15 +32,15 @@
# Commonly supported features
SUPPORT_IROBOT = (
- SUPPORT_BATTERY
- | SUPPORT_PAUSE
- | SUPPORT_RETURN_HOME
- | SUPPORT_SEND_COMMAND
- | SUPPORT_START
- | SUPPORT_STATE
- | SUPPORT_STATUS
- | SUPPORT_STOP
- | SUPPORT_LOCATE
+ VacuumEntityFeature.BATTERY
+ | VacuumEntityFeature.PAUSE
+ | VacuumEntityFeature.RETURN_HOME
+ | VacuumEntityFeature.SEND_COMMAND
+ | VacuumEntityFeature.START
+ | VacuumEntityFeature.STATE
+ | VacuumEntityFeature.STATUS
+ | VacuumEntityFeature.STOP
+ | VacuumEntityFeature.LOCATE
)
STATE_MAP = {
diff --git a/homeassistant/components/roomba/roomba.py b/homeassistant/components/roomba/roomba.py
index 5f960aeaae0e..7cac9a3ba52b 100644
--- a/homeassistant/components/roomba/roomba.py
+++ b/homeassistant/components/roomba/roomba.py
@@ -1,7 +1,7 @@
"""Class for Roomba devices."""
import logging
-from homeassistant.components.vacuum import SUPPORT_FAN_SPEED
+from homeassistant.components.vacuum import VacuumEntityFeature
from .irobot_base import SUPPORT_IROBOT, IRobotVacuum
@@ -16,7 +16,7 @@
FAN_SPEEDS = [FAN_SPEED_AUTOMATIC, FAN_SPEED_ECO, FAN_SPEED_PERFORMANCE]
# Only Roombas with CarpetBost can set their fanspeed
-SUPPORT_ROOMBA_CARPET_BOOST = SUPPORT_IROBOT | SUPPORT_FAN_SPEED
+SUPPORT_ROOMBA_CARPET_BOOST = SUPPORT_IROBOT | VacuumEntityFeature.FAN_SPEED
class RoombaVacuum(IRobotVacuum):
| ## Proposed change
<!--
Describe the big picture of your changes here to communicate to the
maintainers why we should accept this pull request. If it fixes a bug
or resolves a feature request, be sure to link to that issue in the
additional information section.
-->
Use VacuumEntityFeature in roomba
## Type of change
<!--
What type of change does your PR introduce to Home Assistant?
NOTE: Please, check only 1! box!
If your PR requires multiple boxes to be checked, you'll most likely need to
split it into multiple PRs. This makes things easier and faster to code review.
-->
- [ ] Dependency upgrade
- [ ] Bugfix (non-breaking change which fixes an issue)
- [ ] New integration (thank you!)
- [ ] New feature (which adds functionality to an existing integration)
- [ ] Breaking change (fix/feature causing existing functionality to break)
- [x] Code quality improvements to existing code or addition of tests
## Additional information
<!--
Details are important, and help maintainers processing your PR.
Please be sure to fill out additional details, if applicable.
-->
- This PR fixes or closes issue: fixes #
- This PR is related to issue:
- Link to documentation pull request:
## Checklist
<!--
Put an `x` in the boxes that apply. You can also fill these out after
creating the PR. If you're unsure about any of them, don't hesitate to ask.
We're here to help! This is simply a reminder of what we are going to look
for before merging your code.
-->
- [ ] The code change is tested and works locally.
- [ ] Local tests pass. **Your PR cannot be merged unless tests pass**
- [ ] There is no commented out code in this PR.
- [ ] I have followed the [development checklist][dev-checklist]
- [ ] The code has been formatted using Black (`black --fast homeassistant tests`)
- [ ] Tests have been added to verify that the new code works.
If user exposed functionality or configuration variables are added/changed:
- [ ] Documentation added/updated for [www.home-assistant.io][docs-repository]
If the code communicates with devices, web services, or third-party tools:
- [ ] The [manifest file][manifest-docs] has all fields filled out correctly.
Updated and included derived files by running: `python3 -m script.hassfest`.
- [ ] New or updated dependencies have been added to `requirements_all.txt`.
Updated by running `python3 -m script.gen_requirements_all`.
- [ ] For the updated dependencies - a link to the changelog, or at minimum a diff between library versions is added to the PR description.
- [ ] Untested files have been added to `.coveragerc`.
The integration reached or maintains the following [Integration Quality Scale][quality-scale]:
<!--
The Integration Quality Scale scores an integration on the code quality
and user experience. Each level of the quality scale consists of a list
of requirements. We highly recommend getting your integration scored!
-->
- [ ] No score or internal
- [ ] 🥈 Silver
- [ ] 🥇 Gold
- [ ] 🏆 Platinum
<!--
This project is very active and we have a high turnover of pull requests.
Unfortunately, the number of incoming pull requests is higher than what our
reviewers can review and merge so there is a long backlog of pull requests
waiting for review. You can help here!
By reviewing another pull request, you will help raise the code quality of
that pull request and the final review will be faster. This way the general
pace of pull request reviews will go up and your wait time will go down.
When picking a pull request to review, try to choose one that hasn't yet
been reviewed.
Thanks for helping out!
-->
To help with the load of incoming pull requests:
- [ ] I have reviewed two other [open pull requests][prs] in this repository.
[prs]: https://github.com/home-assistant/core/pulls?q=is%3Aopen+is%3Apr+-author%3A%40me+-draft%3Atrue+-label%3Awaiting-for-upstream+sort%3Acreated-desc+review%3Anone+-status%3Afailure
<!--
Thank you for contributing <3
Below, some useful links you could explore:
-->
[dev-checklist]: https://developers.home-assistant.io/docs/en/development_checklist.html
[manifest-docs]: https://developers.home-assistant.io/docs/en/creating_integration_manifest.html
[quality-scale]: https://developers.home-assistant.io/docs/en/next/integration_quality_scale_index.html
[docs-repository]: https://github.com/home-assistant/home-assistant.io
| https://api.github.com/repos/home-assistant/core/pulls/70577 | 2022-04-24T09:15:26Z | 2022-04-24T10:32:16Z | 2022-04-24T10:32:16Z | 2022-04-25T11:06:44Z | 809 | home-assistant/core | 39,332 |
`--single-cls` segments fix | diff --git a/utils/dataloaders.py b/utils/dataloaders.py
index e107d1a2bcc..cc5f8843ef1 100644
--- a/utils/dataloaders.py
+++ b/utils/dataloaders.py
@@ -537,8 +537,6 @@ def __init__(self,
self.segments[i] = segment[j]
if single_cls: # single-class training, merge all classes into 0
self.labels[i][:, 0] = 0
- if segment:
- self.segments[i][:, 0] = 0
# Rectangular Training
if self.rect:
| May resolve #10230
Signed-off-by: Glenn Jocher <[email protected]>
<!--
Thank you for submitting a YOLOv5 🚀 Pull Request! We want to make contributing to YOLOv5 as easy and transparent as possible. A few tips to get you started:
- Search existing YOLOv5 [PRs](https://github.com/ultralytics/yolov5/pull) to see if a similar PR already exists.
- Link this PR to a YOLOv5 [issue](https://github.com/ultralytics/yolov5/issues) to help us understand what bug fix or feature is being implemented.
- Provide before and after profiling/inference/training results to help us quantify the improvement your PR provides (if applicable).
Please see our ✅ [Contributing Guide](https://github.com/ultralytics/yolov5/blob/master/CONTRIBUTING.md) for more details.
-->
## 🛠️ PR Summary
<sub>Made with ❤️ by [Ultralytics Actions](https://github.com/ultralytics/actions)<sub>
### 🌟 Summary
Improved data loader logic for single-class training in YOLOv5.
### 📊 Key Changes
- Removed redundant lines of code that unnecessarily modified `segments` during single-class training.
### 🎯 Purpose & Impact
- The simplification of the data loader will result in cleaner code and potentially fewer bugs.
- Users performing single-class training will benefit from this change, although the impact on training performance or results should be minimal.
- 🚀 This contributes to the overall maintainability and readability of the YOLOv5 codebase. | https://api.github.com/repos/ultralytics/yolov5/pulls/10260 | 2022-11-22T19:24:44Z | 2022-11-22T19:30:14Z | 2022-11-22T19:30:14Z | 2024-01-19T04:11:47Z | 143 | ultralytics/yolov5 | 25,708 |
Added Optimization Algorithms | diff --git a/books.md b/books.md
index a9b9eb43..cab2362d 100644
--- a/books.md
+++ b/books.md
@@ -65,7 +65,8 @@ The following is a list of free and/or open source books on machine learning, st
- [Managing Machine Learning Projects: From design to deployment](https://www.manning.com/books/managing-machine-learning-projects) - Simon Thompson
- [Causal Machine Learning](https://www.manning.com/books/causal-machine-learning) - Robert Ness - Practical introduction to building AI models that can reason about causality.
- [Bayesian Optimization in Action](https://www.manning.com/books/bayesian-optimization-in-action) - Quan Nguyen - Book about building Bayesian optimization systems from the ground up.
-- - [Machine Learning Algorithms in Depth](https://www.manning.com/books/machine-learning-algorithms-in-depth)) - Vadim Smolyakov - Book about practical implementations of dozens of ML algorithms.
+- [Machine Learning Algorithms in Depth](https://www.manning.com/books/machine-learning-algorithms-in-depth)) - Vadim Smolyakov - Book about practical implementations of dozens of ML algorithms.
+- [Optimization Algorithms](https://www.manning.com/books/optimization-algorithms) - Alaa Khamis - Book about how to solve design, planning, and control problems using modern machine learning and AI techniques.
## Deep Learning
| Hi,
Stjepan from Manning here again. This time we have a title on optimization and search algorithms. Thank you for considering it.
Best, | https://api.github.com/repos/josephmisiti/awesome-machine-learning/pulls/914 | 2023-02-01T11:34:30Z | 2023-03-03T14:12:10Z | 2023-03-03T14:12:09Z | 2023-03-03T16:22:20Z | 303 | josephmisiti/awesome-machine-learning | 52,013 |
REF: share string parsing code | diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx
index 10bcf6c9eabbf..36001248d664b 100644
--- a/pandas/_libs/tslib.pyx
+++ b/pandas/_libs/tslib.pyx
@@ -51,7 +51,7 @@ from pandas._libs.tslibs.parsing import parse_datetime_string
from pandas._libs.tslibs.conversion cimport (
_TSObject,
cast_from_unit,
- convert_datetime_to_tsobject,
+ convert_str_to_tsobject,
convert_timezone,
get_datetime64_nanos,
parse_pydatetime,
@@ -482,7 +482,6 @@ cpdef array_to_datetime(
object val, tz
ndarray[int64_t] iresult
npy_datetimestruct dts
- NPY_DATETIMEUNIT out_bestunit
bint utc_convert = bool(utc)
bint seen_datetime_offset = False
bint is_raise = errors=="raise"
@@ -490,12 +489,8 @@ cpdef array_to_datetime(
bint is_coerce = errors=="coerce"
bint is_same_offsets
_TSObject _ts
- int64_t value
- int out_local = 0, out_tzoffset = 0
float tz_offset
set out_tzoffset_vals = set()
- bint string_to_dts_failed
- datetime py_dt
tzinfo tz_out = None
bint found_tz = False, found_naive = False
cnp.broadcast mi
@@ -557,61 +552,40 @@ cpdef array_to_datetime(
# GH#32264 np.str_ object
val = str(val)
- if len(val) == 0 or val in nat_strings:
- iresult[i] = NPY_NAT
+ if parse_today_now(val, &iresult[i], utc):
+ # We can't _quite_ dispatch this to convert_str_to_tsobject
+ # bc there isn't a nice way to pass "utc"
cnp.PyArray_MultiIter_NEXT(mi)
continue
- string_to_dts_failed = string_to_dts(
- val, &dts, &out_bestunit, &out_local,
- &out_tzoffset, False, None, False
+ _ts = convert_str_to_tsobject(
+ val, None, unit="ns", dayfirst=dayfirst, yearfirst=yearfirst
)
- if string_to_dts_failed:
- # An error at this point is a _parsing_ error
- # specifically _not_ OutOfBoundsDatetime
- if parse_today_now(val, &iresult[i], utc):
- cnp.PyArray_MultiIter_NEXT(mi)
- continue
-
- py_dt = parse_datetime_string(val,
- dayfirst=dayfirst,
- yearfirst=yearfirst)
- # If the dateutil parser returned tzinfo, capture it
- # to check if all arguments have the same tzinfo
- tz = py_dt.utcoffset()
-
- if tz is not None:
- seen_datetime_offset = True
- # dateutil timezone objects cannot be hashed, so
- # store the UTC offsets in seconds instead
- out_tzoffset_vals.add(tz.total_seconds())
- else:
- # Add a marker for naive string, to track if we are
- # parsing mixed naive and aware strings
- out_tzoffset_vals.add("naive")
-
- _ts = convert_datetime_to_tsobject(py_dt, None)
- iresult[i] = _ts.value
+ try:
+ _ts.ensure_reso(NPY_FR_ns)
+ except OutOfBoundsDatetime as err:
+ # re-raise with better exception message
+ raise OutOfBoundsDatetime(
+ f"Out of bounds nanosecond timestamp: {val}"
+ ) from err
+
+ iresult[i] = _ts.value
+
+ tz = _ts.tzinfo
+ if tz is not None:
+ # dateutil timezone objects cannot be hashed, so
+ # store the UTC offsets in seconds instead
+ nsecs = tz.utcoffset(None).total_seconds()
+ out_tzoffset_vals.add(nsecs)
+ # need to set seen_datetime_offset *after* the
+ # potentially-raising timezone(timedelta(...)) call,
+ # otherwise we can go down the is_same_offsets path
+ # bc len(out_tzoffset_vals) == 0
+ seen_datetime_offset = True
else:
- # No error reported by string_to_dts, pick back up
- # where we left off
- value = npy_datetimestruct_to_datetime(NPY_FR_ns, &dts)
- if out_local == 1:
- seen_datetime_offset = True
- # Store the out_tzoffset in seconds
- # since we store the total_seconds of
- # dateutil.tz.tzoffset objects
- out_tzoffset_vals.add(out_tzoffset * 60.)
- tz = timezone(timedelta(minutes=out_tzoffset))
- value = tz_localize_to_utc_single(value, tz)
- out_local = 0
- out_tzoffset = 0
- else:
- # Add a marker for naive string, to track if we are
- # parsing mixed naive and aware strings
- out_tzoffset_vals.add("naive")
- iresult[i] = value
- check_dts_bounds(&dts)
+ # Add a marker for naive string, to track if we are
+ # parsing mixed naive and aware strings
+ out_tzoffset_vals.add("naive")
else:
raise TypeError(f"{type(val)} is not convertible to datetime")
diff --git a/pandas/_libs/tslibs/conversion.pxd b/pandas/_libs/tslibs/conversion.pxd
index 332ff1522ccf5..756ab67aa7084 100644
--- a/pandas/_libs/tslibs/conversion.pxd
+++ b/pandas/_libs/tslibs/conversion.pxd
@@ -35,6 +35,10 @@ cdef _TSObject convert_datetime_to_tsobject(datetime ts, tzinfo tz,
int32_t nanos=*,
NPY_DATETIMEUNIT reso=*)
+cdef _TSObject convert_str_to_tsobject(str ts, tzinfo tz, str unit,
+ bint dayfirst=*,
+ bint yearfirst=*)
+
cdef int64_t get_datetime64_nanos(object val, NPY_DATETIMEUNIT reso) except? -1
cpdef datetime localize_pydatetime(datetime dt, tzinfo tz)
diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx
index 7cff269d2191e..aacb06fe36037 100644
--- a/pandas/_libs/tslibs/conversion.pyx
+++ b/pandas/_libs/tslibs/conversion.pyx
@@ -246,7 +246,7 @@ cdef _TSObject convert_to_tsobject(object ts, tzinfo tz, str unit,
obj = _TSObject()
if isinstance(ts, str):
- return _convert_str_to_tsobject(ts, tz, unit, dayfirst, yearfirst)
+ return convert_str_to_tsobject(ts, tz, unit, dayfirst, yearfirst)
if ts is None or ts is NaT:
obj.value = NPY_NAT
@@ -463,9 +463,9 @@ cdef _TSObject _create_tsobject_tz_using_offset(npy_datetimestruct dts,
return obj
-cdef _TSObject _convert_str_to_tsobject(str ts, tzinfo tz, str unit,
- bint dayfirst=False,
- bint yearfirst=False):
+cdef _TSObject convert_str_to_tsobject(str ts, tzinfo tz, str unit,
+ bint dayfirst=False,
+ bint yearfirst=False):
"""
Convert a string input `ts`, along with optional timezone object`tz`
to a _TSObject.
diff --git a/pandas/tests/indexes/datetimes/test_scalar_compat.py b/pandas/tests/indexes/datetimes/test_scalar_compat.py
index be05a649ec0b6..622f41236edb9 100644
--- a/pandas/tests/indexes/datetimes/test_scalar_compat.py
+++ b/pandas/tests/indexes/datetimes/test_scalar_compat.py
@@ -38,7 +38,10 @@ def test_dti_date(self):
@pytest.mark.parametrize("data", [["1400-01-01"], [datetime(1400, 1, 1)]])
def test_dti_date_out_of_range(self, data):
# GH#1475
- msg = "^Out of bounds nanosecond timestamp: 1400-01-01 00:00:00, at position 0$"
+ msg = (
+ "^Out of bounds nanosecond timestamp: "
+ "1400-01-01( 00:00:00)?, at position 0$"
+ )
with pytest.raises(OutOfBoundsDatetime, match=msg):
DatetimeIndex(data)
diff --git a/pandas/tests/tools/test_to_datetime.py b/pandas/tests/tools/test_to_datetime.py
index a6e40c30d5b82..bf0db0da1c3e3 100644
--- a/pandas/tests/tools/test_to_datetime.py
+++ b/pandas/tests/tools/test_to_datetime.py
@@ -2783,7 +2783,7 @@ def test_day_not_in_month_coerce(self, cache, arg, format, warning):
assert isna(to_datetime(arg, errors="coerce", format=format, cache=cache))
def test_day_not_in_month_raise(self, cache):
- msg = "day is out of range for month"
+ msg = "could not convert string to Timestamp"
with pytest.raises(ValueError, match=msg):
with tm.assert_produces_warning(
UserWarning, match="Could not infer format"
| - [ ] closes #xxxx (Replace xxxx with the GitHub issue number)
- [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
- [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
- [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions.
- [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
| https://api.github.com/repos/pandas-dev/pandas/pulls/50736 | 2023-01-13T18:44:16Z | 2023-01-16T18:17:14Z | 2023-01-16T18:17:14Z | 2023-01-16T18:19:27Z | 2,239 | pandas-dev/pandas | 45,016 |
Typo in comment rabin_karp.py | diff --git a/strings/rabin_karp.py b/strings/rabin_karp.py
index 1fb145ec97fa..22da0de80f4c 100644
--- a/strings/rabin_karp.py
+++ b/strings/rabin_karp.py
@@ -40,7 +40,7 @@ def rabin_karp(pattern, text):
return True
if i == t_len - p_len:
continue
- # Calculating the ruling hash
+ # Calculate the https://en.wikipedia.org/wiki/Rolling_hash
text_hash = (
(text_hash - ord(text[i]) * modulus_power) * alphabet_size
+ ord(text[i + p_len])
| fix: typo
### **Describe your change:**
* [X] Fix a bug or typo in an existing algorithm?
### **Checklist:**
* [X] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [X] This pull request is all my own work -- I have not plagiarized.
* [X] I know that pull requests will not be merged if they fail the automated tests.
* [X] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [X] All new Python files are placed inside an existing directory.
* [X] All filenames are in all lowercase characters with no spaces or dashes.
* [X] All functions and variable names follow Python naming conventions.
* [X] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [X] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [X] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [X] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| https://api.github.com/repos/TheAlgorithms/Python/pulls/1820 | 2020-03-29T03:32:21Z | 2020-03-29T08:19:19Z | 2020-03-29T08:19:19Z | 2020-03-29T08:20:50Z | 154 | TheAlgorithms/Python | 29,943 |
Release Certbot 2.6.0 | diff --git a/acme/docs/jws-help.txt b/acme/docs/jws-help.txt
index 34cf5ce2356..bfd16dff4d7 100644
--- a/acme/docs/jws-help.txt
+++ b/acme/docs/jws-help.txt
@@ -3,6 +3,6 @@ usage: jws [-h] [--compact] {sign,verify} ...
positional arguments:
{sign,verify}
-optional arguments:
+options:
-h, --help show this help message and exit
--compact
diff --git a/acme/setup.py b/acme/setup.py
index 904ab162f55..4296e4ad6df 100644
--- a/acme/setup.py
+++ b/acme/setup.py
@@ -3,7 +3,7 @@
from setuptools import find_packages
from setuptools import setup
-version = '2.6.0.dev0'
+version = '2.7.0.dev0'
install_requires = [
'cryptography>=3.2.1',
diff --git a/certbot-apache/setup.py b/certbot-apache/setup.py
index 18f60bfeab9..d877eb668b4 100644
--- a/certbot-apache/setup.py
+++ b/certbot-apache/setup.py
@@ -1,7 +1,7 @@
from setuptools import find_packages
from setuptools import setup
-version = '2.6.0.dev0'
+version = '2.7.0.dev0'
install_requires = [
# We specify the minimum acme and certbot version as the current plugin
diff --git a/certbot-compatibility-test/setup.py b/certbot-compatibility-test/setup.py
index e7510b6cd58..291604986db 100644
--- a/certbot-compatibility-test/setup.py
+++ b/certbot-compatibility-test/setup.py
@@ -1,7 +1,7 @@
from setuptools import find_packages
from setuptools import setup
-version = '2.6.0.dev0'
+version = '2.7.0.dev0'
install_requires = [
'certbot',
diff --git a/certbot-dns-cloudflare/setup.py b/certbot-dns-cloudflare/setup.py
index 124fc0c0802..5e388acd6b9 100644
--- a/certbot-dns-cloudflare/setup.py
+++ b/certbot-dns-cloudflare/setup.py
@@ -4,7 +4,7 @@
from setuptools import find_packages
from setuptools import setup
-version = '2.6.0.dev0'
+version = '2.7.0.dev0'
install_requires = [
'cloudflare>=1.5.1',
diff --git a/certbot-dns-digitalocean/setup.py b/certbot-dns-digitalocean/setup.py
index c378ee1493b..b7659b4352c 100644
--- a/certbot-dns-digitalocean/setup.py
+++ b/certbot-dns-digitalocean/setup.py
@@ -4,7 +4,7 @@
from setuptools import find_packages
from setuptools import setup
-version = '2.6.0.dev0'
+version = '2.7.0.dev0'
install_requires = [
'python-digitalocean>=1.11', # 1.15.0 or newer is recommended for TTL support
diff --git a/certbot-dns-dnsimple/setup.py b/certbot-dns-dnsimple/setup.py
index b2b74180abb..51ff8eac862 100644
--- a/certbot-dns-dnsimple/setup.py
+++ b/certbot-dns-dnsimple/setup.py
@@ -4,7 +4,7 @@
from setuptools import find_packages
from setuptools import setup
-version = '2.6.0.dev0'
+version = '2.7.0.dev0'
install_requires = [
# This version of lexicon is required to address the problem described in
diff --git a/certbot-dns-dnsmadeeasy/setup.py b/certbot-dns-dnsmadeeasy/setup.py
index 4959ccc19bf..d664587fc26 100644
--- a/certbot-dns-dnsmadeeasy/setup.py
+++ b/certbot-dns-dnsmadeeasy/setup.py
@@ -4,7 +4,7 @@
from setuptools import find_packages
from setuptools import setup
-version = '2.6.0.dev0'
+version = '2.7.0.dev0'
install_requires = [
'dns-lexicon>=3.2.1',
diff --git a/certbot-dns-gehirn/setup.py b/certbot-dns-gehirn/setup.py
index 76beb15a184..77846326fd9 100644
--- a/certbot-dns-gehirn/setup.py
+++ b/certbot-dns-gehirn/setup.py
@@ -4,7 +4,7 @@
from setuptools import find_packages
from setuptools import setup
-version = '2.6.0.dev0'
+version = '2.7.0.dev0'
install_requires = [
'dns-lexicon>=3.2.1',
diff --git a/certbot-dns-google/setup.py b/certbot-dns-google/setup.py
index 7649d48bf64..b74bba0b8a8 100644
--- a/certbot-dns-google/setup.py
+++ b/certbot-dns-google/setup.py
@@ -4,7 +4,7 @@
from setuptools import find_packages
from setuptools import setup
-version = '2.6.0.dev0'
+version = '2.7.0.dev0'
install_requires = [
'google-api-python-client>=1.6.5',
diff --git a/certbot-dns-linode/setup.py b/certbot-dns-linode/setup.py
index e6690e93b31..40cb730eae4 100644
--- a/certbot-dns-linode/setup.py
+++ b/certbot-dns-linode/setup.py
@@ -4,7 +4,7 @@
from setuptools import find_packages
from setuptools import setup
-version = '2.6.0.dev0'
+version = '2.7.0.dev0'
install_requires = [
'dns-lexicon>=3.2.1',
diff --git a/certbot-dns-luadns/setup.py b/certbot-dns-luadns/setup.py
index e61c6d31721..3be44321a42 100644
--- a/certbot-dns-luadns/setup.py
+++ b/certbot-dns-luadns/setup.py
@@ -4,7 +4,7 @@
from setuptools import find_packages
from setuptools import setup
-version = '2.6.0.dev0'
+version = '2.7.0.dev0'
install_requires = [
'dns-lexicon>=3.2.1',
diff --git a/certbot-dns-nsone/setup.py b/certbot-dns-nsone/setup.py
index dd452de44ff..995b537cb28 100644
--- a/certbot-dns-nsone/setup.py
+++ b/certbot-dns-nsone/setup.py
@@ -4,7 +4,7 @@
from setuptools import find_packages
from setuptools import setup
-version = '2.6.0.dev0'
+version = '2.7.0.dev0'
install_requires = [
'dns-lexicon>=3.2.1',
diff --git a/certbot-dns-ovh/setup.py b/certbot-dns-ovh/setup.py
index eea84d5df07..e0ea2a4ab35 100644
--- a/certbot-dns-ovh/setup.py
+++ b/certbot-dns-ovh/setup.py
@@ -4,7 +4,7 @@
from setuptools import find_packages
from setuptools import setup
-version = '2.6.0.dev0'
+version = '2.7.0.dev0'
install_requires = [
'dns-lexicon>=3.2.1',
diff --git a/certbot-dns-rfc2136/setup.py b/certbot-dns-rfc2136/setup.py
index f1822f12828..bcd06efb6d4 100644
--- a/certbot-dns-rfc2136/setup.py
+++ b/certbot-dns-rfc2136/setup.py
@@ -4,7 +4,7 @@
from setuptools import find_packages
from setuptools import setup
-version = '2.6.0.dev0'
+version = '2.7.0.dev0'
install_requires = [
'dnspython>=1.15.0',
diff --git a/certbot-dns-route53/setup.py b/certbot-dns-route53/setup.py
index 7a0097df053..0237a096970 100644
--- a/certbot-dns-route53/setup.py
+++ b/certbot-dns-route53/setup.py
@@ -4,7 +4,7 @@
from setuptools import find_packages
from setuptools import setup
-version = '2.6.0.dev0'
+version = '2.7.0.dev0'
install_requires = [
'boto3>=1.15.15',
diff --git a/certbot-dns-sakuracloud/setup.py b/certbot-dns-sakuracloud/setup.py
index 65f5e8b2752..bbce635c4e2 100644
--- a/certbot-dns-sakuracloud/setup.py
+++ b/certbot-dns-sakuracloud/setup.py
@@ -4,7 +4,7 @@
from setuptools import find_packages
from setuptools import setup
-version = '2.6.0.dev0'
+version = '2.7.0.dev0'
install_requires = [
'dns-lexicon>=3.2.1',
diff --git a/certbot-nginx/setup.py b/certbot-nginx/setup.py
index 59be22298b3..e0dcd4f2259 100644
--- a/certbot-nginx/setup.py
+++ b/certbot-nginx/setup.py
@@ -1,7 +1,7 @@
from setuptools import find_packages
from setuptools import setup
-version = '2.6.0.dev0'
+version = '2.7.0.dev0'
install_requires = [
# We specify the minimum acme and certbot version as the current plugin
diff --git a/certbot/CHANGELOG.md b/certbot/CHANGELOG.md
index 3496e84d237..9f33167a359 100644
--- a/certbot/CHANGELOG.md
+++ b/certbot/CHANGELOG.md
@@ -2,7 +2,23 @@
Certbot adheres to [Semantic Versioning](https://semver.org/).
-## 2.6.0 - master
+## 2.7.0 - master
+
+### Added
+
+*
+
+### Changed
+
+*
+
+### Fixed
+
+*
+
+More details about these changes can be found on our GitHub repo.
+
+## 2.6.0 - 2023-05-09
### Added
diff --git a/certbot/certbot/__init__.py b/certbot/certbot/__init__.py
index 343efe7af5f..82ddadb759e 100644
--- a/certbot/certbot/__init__.py
+++ b/certbot/certbot/__init__.py
@@ -1,3 +1,3 @@
"""Certbot client."""
# version number like 1.2.3a0, must have at least 2 parts, like 1.2
-__version__ = '2.6.0.dev0'
+__version__ = '2.7.0.dev0'
diff --git a/certbot/docs/cli-help.txt b/certbot/docs/cli-help.txt
index c1d954b05b8..220362b9582 100644
--- a/certbot/docs/cli-help.txt
+++ b/certbot/docs/cli-help.txt
@@ -36,7 +36,7 @@ manage your account:
--agree-tos Agree to the ACME server's Subscriber Agreement
-m EMAIL Email address for important account notifications
-optional arguments:
+options:
-h, --help show this help message and exit
-c CONFIG_FILE, --config CONFIG_FILE
path to config file (default: /etc/letsencrypt/cli.ini
@@ -74,26 +74,24 @@ optional arguments:
HMAC key for External Account Binding (default: None)
--cert-name CERTNAME Certificate name to apply. This name is used by
Certbot for housekeeping and in file paths; it doesn't
- affect the content of the certificate itself. To see
+ affect the content of the certificate itself.
+ Certificate name cannot contain filepath separators
+ (i.e. '/' or '\', depending on the platform). To see
certificate names, run 'certbot certificates'. When
creating a new certificate, specifies the new
certificate's name. (default: the first provided
domain or the name of an existing certificate on your
system for the same domains)
- --dry-run Perform a test run of the client, obtaining test
- (invalid) certificates but not saving them to disk.
- This can currently only be used with the 'certonly'
- and 'renew' subcommands. Note: Although --dry-run
- tries to avoid making any persistent changes on a
- system, it is not completely side-effect free: if used
- with webserver authenticator plugins like apache and
- nginx, it makes and then reverts temporary config
- changes in order to obtain test certificates, and
- reloads webservers to deploy and then roll back those
- changes. It also calls --pre-hook and --post-hook
- commands if they are defined because they may be
- necessary to accurately simulate renewal. --deploy-
- hook commands are not called. (default: False)
+ --dry-run Perform a test run against the Let's Encrypt staging
+ server, obtaining test (invalid) certificates but not
+ saving them to disk. This can only be used with the
+ 'certonly' and 'renew' subcommands. It may trigger
+ webserver reloads to temporarily modify & roll back
+ configuration files. --pre-hook and --post-hook
+ commands run by default. --deploy-hook commands do not
+ run, unless enabled by --run-deploy-hooks. The test
+ server may be overridden with --server. (default:
+ False)
--debug-challenges After setting up challenges, wait for user input
before submitting to CA. When used in combination with
the `-v` option, the challenge URLs or FQDNs and their
@@ -124,7 +122,7 @@ optional arguments:
case, and to know when to deprecate support for past
Python versions and flags. If you wish to hide this
information from the Let's Encrypt server, set this to
- "". (default: CertbotACMEClient/2.5.0 (certbot;
+ "". (default: CertbotACMEClient/2.6.0 (certbot;
OS_NAME OS_VERSION) Authenticator/XXX Installer/YYY
(SUBCOMMAND; flags: FLAGS) Py/major.minor.patchlevel).
The flags encoded in the user agent are: --duplicate,
@@ -239,10 +237,11 @@ testing:
recommended when modifying the deploy hook using
`reconfigure`. (default: False)
--test-cert, --staging
- Use the staging server to obtain or revoke test
- (invalid) certificates; equivalent to --server
- https://acme-staging-v02.api.letsencrypt.org/directory
- (default: False)
+ Use the Let's Encrypt staging server to obtain or
+ revoke test (invalid) certificates; equivalent to
+ --server https://acme-
+ staging-v02.api.letsencrypt.org/directory (default:
+ False)
--debug Show tracebacks in case of errors (default: False)
--no-verify-ssl Disable verification of the ACME server's certificate.
The root certificates trusted by Certbot can be
@@ -603,13 +602,22 @@ dns-google:
before asking the ACME server to verify the DNS
record. (default: 60)
--dns-google-credentials DNS_GOOGLE_CREDENTIALS
- Path to Google Cloud DNS service account JSON file.
- (See https://developers.google.com/identity/protocols/
- OAuth2ServiceAccount#creatinganaccount forinformation
- about creating a service account and
- https://cloud.google.com/dns/access-
+ Path to Google Cloud DNS service account JSON file to
+ use instead of relying on Application Default
+ Credentials (ADC). (See https://cloud.google.com/docs/
+ authentication/application-default-credentials for
+ information about ADC, https://developers.google.com/i
+ dentity/protocols/OAuth2ServiceAccount#creatinganaccou
+ nt for information about creating a service account,
+ and https://cloud.google.com/dns/access-
control#permissions_and_roles for information about
- therequired permissions.) (default: None)
+ the permissions required to modify Cloud DNS records.)
+ (default: None)
+ --dns-google-project DNS_GOOGLE_PROJECT
+ The ID of the Google Cloud project that the Google
+ Cloud DNS managed zone(s) reside in. This will be
+ determined automatically if not specified. (default:
+ None)
dns-linode:
Obtain certificates using a DNS TXT record (if you are using Linode for
| Merge this PR. | https://api.github.com/repos/certbot/certbot/pulls/9692 | 2023-05-09T20:39:28Z | 2023-05-09T22:52:33Z | 2023-05-09T22:52:33Z | 2023-05-09T22:52:34Z | 3,987 | certbot/certbot | 2,568 |
Simplify identity initializer with zero padding | diff --git a/keras/initializers.py b/keras/initializers.py
index 6d51491a3d6..ab291c064de 100644
--- a/keras/initializers.py
+++ b/keras/initializers.py
@@ -283,20 +283,7 @@ def __call__(self, shape, dtype=None):
raise ValueError(
'Identity matrix initializer can only be used for 2D matrices.')
- if shape[0] == shape[1]:
- return self.gain * np.identity(shape[0])
- elif shape[0] > shape[1]:
- return self.gain * np.concatenate(
- [np.identity(shape[1]),
- np.zeros((shape[0] - shape[1], shape[1]))],
- axis=0
- )
- else:
- return self.gain * np.concatenate(
- [np.identity(shape[0]),
- np.zeros((shape[0], shape[1] - shape[0]))],
- axis=1
- )
+ return self.gain * np.eye(shape[0], shape[1])
def get_config(self):
return {
| ### Summary
This PR is a follow-up of #11887. The zero-padding for non-square identity matrices is the default behavior of `np.eye`.
### Related Issues
### PR Overview
- [ ] This PR requires new unit tests [y/n] (make sure tests are included)
- [ ] This PR requires to update the documentation [y/n] (make sure the docs are up-to-date)
- [ ] This PR is backwards compatible [y/n]
- [ ] This PR changes the current API [y/n] (all API changes need to be approved by fchollet)
| https://api.github.com/repos/keras-team/keras/pulls/11986 | 2019-01-07T04:26:49Z | 2019-01-09T23:57:19Z | 2019-01-09T23:57:19Z | 2019-01-10T00:22:11Z | 264 | keras-team/keras | 47,796 |
Xiami: update collect url; fix #502 | diff --git a/src/you_get/extractors/xiami.py b/src/you_get/extractors/xiami.py
index 143e6eb5be..4e0baec051 100644
--- a/src/you_get/extractors/xiami.py
+++ b/src/you_get/extractors/xiami.py
@@ -61,7 +61,7 @@ def xiami_download_song(sid, output_dir = '.', merge = True, info_only = False):
print_info(site_info, song_title, ext, size)
if not info_only:
- file_name = "%s - %s - %s" % (song_title, album_name, artist)
+ file_name = "%s - %s - %s" % (song_title, artist, album_name)
download_urls([url], file_name, ext, size, output_dir, merge = merge, faker = True)
try:
xiami_download_lyric(lrc_url, file_name, output_dir)
@@ -78,10 +78,16 @@ def xiami_download_showcollect(cid, output_dir = '.', merge = True, info_only =
tracks = doc.getElementsByTagName("track")
track_nr = 1
for i in tracks:
- artist = i.getElementsByTagName("artist")[0].firstChild.nodeValue
- album_name = i.getElementsByTagName("album_name")[0].firstChild.nodeValue
- song_title = i.getElementsByTagName("title")[0].firstChild.nodeValue
- url = location_dec(i.getElementsByTagName("location")[0].firstChild.nodeValue)
+ artist=album_name=song_title=url=""
+ try:
+ song_id = i.getElementsByTagName("song_id")[0].firstChild.nodeValue
+ artist = i.getElementsByTagName("artist")[0].firstChild.nodeValue
+ album_name = i.getElementsByTagName("album_name")[0].firstChild.nodeValue
+ song_title = i.getElementsByTagName("title")[0].firstChild.nodeValue
+ url = location_dec(i.getElementsByTagName("location")[0].firstChild.nodeValue)
+ except:
+ log.e("Song %s failed. [Info Missing] artist:%s, album:%s, title:%s, url:%s" % (song_id, artist, album_name, song_title, url))
+ continue
try:
lrc_url = i.getElementsByTagName("lyric")[0].firstChild.nodeValue
except:
@@ -142,8 +148,8 @@ def xiami_download(url, output_dir = '.', stream_type = None, merge = True, info
id = r1(r'http://www.xiami.com/album/(\d+)', url)
xiami_download_album(id, output_dir, merge, info_only)
- if re.match(r'http://www.xiami.com/song/showcollect/id/\d+', url):
- id = r1(r'http://www.xiami.com/song/showcollect/id/(\d+)', url)
+ if re.match(r'http://www.xiami.com/collect/\d+', url):
+ id = r1(r'http://www.xiami.com/collect/(\d+)', url)
xiami_download_showcollect(id, output_dir, merge, info_only)
if re.match('http://www.xiami.com/song/\d+', url):
| about b61e995f9228ef629e51bd4be4225ea473699dcf
update showcollect url as described in issue #502
<!-- Reviewable:start -->
[<img src="https://reviewable.io/review_button.png" height=40 alt="Review on Reviewable"/>](https://reviewable.io/reviews/soimort/you-get/503)
<!-- Reviewable:end -->
| https://api.github.com/repos/soimort/you-get/pulls/503 | 2015-03-09T05:54:58Z | 2015-04-02T02:33:25Z | 2015-04-02T02:33:25Z | 2015-04-02T02:33:59Z | 702 | soimort/you-get | 21,394 |
CI: fix conda version | diff --git a/ci/install_travis.sh b/ci/install_travis.sh
index 272e7f2e05d14..c92da8d4774e1 100755
--- a/ci/install_travis.sh
+++ b/ci/install_travis.sh
@@ -48,7 +48,12 @@ echo
echo "[update conda]"
conda config --set ssl_verify false || exit 1
conda config --set quiet true --set always_yes true --set changeps1 false || exit 1
-conda update -q conda
+
+# TODO(jreback), fix conoda version
+echo
+echo "[conda version]"
+conda install conda=4.4.4
+# conda update -q conda
if [ "$CONDA_BUILD_TEST" ]; then
echo
| https://api.github.com/repos/pandas-dev/pandas/pulls/19025 | 2018-01-01T13:43:53Z | 2018-01-01T14:15:38Z | 2018-01-01T14:15:38Z | 2018-01-01T14:15:38Z | 180 | pandas-dev/pandas | 45,560 |
|
acme: v4 DNS challenge | diff --git a/acme/acme/challenges.py b/acme/acme/challenges.py
index a2235b61ee5..13186cc4f8a 100644
--- a/acme/acme/challenges.py
+++ b/acme/acme/challenges.py
@@ -514,10 +514,100 @@ class DNS(DVChallenge):
"""
typ = "dns"
- token = jose.Field("token")
+
+ LABEL = "_acme-challenge"
+ """Label clients prepend to the domain name being validated."""
+
+ TOKEN_SIZE = 128 / 8 # Based on the entropy value from the spec
+ """Minimum size of the :attr:`token` in bytes."""
+
+ token = jose.Field(
+ "token", encoder=jose.encode_b64jose, decoder=functools.partial(
+ jose.decode_b64jose, size=TOKEN_SIZE, minimum=True))
+
+ def gen_validation(self, account_key, alg=jose.RS256, **kwargs):
+ """Generate validation.
+
+ :param .JWK account_key: Private account key.
+ :param .JWA alg:
+
+ :returns: This challenge wrapped in `.JWS`
+ :rtype: .JWS
+
+ """
+ return jose.JWS.sign(
+ payload=self.json_dumps(sort_keys=True).encode('utf-8'),
+ key=account_key, alg=alg, **kwargs)
+
+ def check_validation(self, validation, account_public_key):
+ """Check validation.
+
+ :param validation
+ :type account_public_key:
+ `~cryptography.hazmat.primitives.asymmetric.rsa.RSAPublicKey`
+ or
+ `~cryptography.hazmat.primitives.asymmetric.dsa.DSAPublicKey`
+ or
+ `~cryptography.hazmat.primitives.asymmetric.ec.EllipticCurvePublicKey`
+ wrapped in `.ComparableKey`
+
+ :rtype: bool
+
+ """
+ if not validation.verify(key=account_public_key):
+ return False
+ try:
+ return self == self.json_loads(
+ validation.payload.decode('utf-8'))
+ except jose.DeserializationError as error:
+ logger.debug("Checking validation for DNS failed: %s", error)
+ return False
+
+ def gen_response(self, account_key, **kwargs):
+ """Generate response.
+
+ :param .JWK account_key: Private account key.
+ :param .JWA alg:
+
+ :rtype: DNSResponse
+
+ """
+ return DNSResponse(validation=self.gen_validation(
+ self, account_key, **kwargs))
+
+ def validation_domain_name(self, name):
+ """Domain name for TXT validation record.
+
+ :param unicode name: Domain name being validated.
+
+ """
+ return "{0}.{1}".format(self.LABEL, name)
@ChallengeResponse.register
class DNSResponse(ChallengeResponse):
- """ACME "dns" challenge response."""
+ """ACME "dns" challenge response.
+
+ :param JWS validation:
+
+ """
typ = "dns"
+
+ validation = jose.Field("validation", decoder=jose.JWS.from_json)
+
+ def check_validation(self, chall, account_public_key):
+ """Check validation.
+
+ :param challenges.DNS chall:
+ :type account_public_key:
+ `~cryptography.hazmat.primitives.asymmetric.rsa.RSAPublicKey`
+ or
+ `~cryptography.hazmat.primitives.asymmetric.dsa.DSAPublicKey`
+ or
+ `~cryptography.hazmat.primitives.asymmetric.ec.EllipticCurvePublicKey`
+ wrapped in `.ComparableKey`
+
+ :rtype: bool
+
+ """
+ return chall.check_validation(self.validation, account_public_key)
diff --git a/acme/acme/challenges_test.py b/acme/acme/challenges_test.py
index d123eca20c8..06f5dffe149 100644
--- a/acme/acme/challenges_test.py
+++ b/acme/acme/challenges_test.py
@@ -570,9 +570,15 @@ def test_from_json_hashable(self):
class DNSTest(unittest.TestCase):
def setUp(self):
+ self.account_key = jose.JWKRSA.load(
+ test_util.load_vector('rsa512_key.pem'))
from acme.challenges import DNS
- self.msg = DNS(token='17817c66b60ce2e4012dfad92657527a')
- self.jmsg = {'type': 'dns', 'token': '17817c66b60ce2e4012dfad92657527a'}
+ self.msg = DNS(token=jose.b64decode(
+ b'evaGxfADs6pSRb2LAv9IZf17Dt3juxGJ-PCt92wr-oA'))
+ self.jmsg = {
+ 'type': 'dns',
+ 'token': 'evaGxfADs6pSRb2LAv9IZf17Dt3juxGJ-PCt92wr-oA',
+ }
def test_to_partial_json(self):
self.assertEqual(self.jmsg, self.msg.to_partial_json())
@@ -585,27 +591,84 @@ def test_from_json_hashable(self):
from acme.challenges import DNS
hash(DNS.from_json(self.jmsg))
+ def test_gen_check_validation(self):
+ self.assertTrue(self.msg.check_validation(
+ self.msg.gen_validation(self.account_key),
+ self.account_key.public_key()))
+
+ def test_gen_check_validation_wrong_key(self):
+ key2 = jose.JWKRSA.load(test_util.load_vector('rsa1024_key.pem'))
+ self.assertFalse(self.msg.check_validation(
+ self.msg.gen_validation(self.account_key), key2.public_key()))
+
+ def test_check_validation_wrong_payload(self):
+ validations = tuple(
+ jose.JWS.sign(payload=payload, alg=jose.RS256, key=self.account_key)
+ for payload in (b'', b'{}')
+ )
+ for validation in validations:
+ self.assertFalse(self.msg.check_validation(
+ validation, self.account_key.public_key()))
+
+ def test_check_validation_wrong_fields(self):
+ bad_validation = jose.JWS.sign(
+ payload=self.msg.update(token=b'x' * 20).json_dumps().encode('utf-8'),
+ alg=jose.RS256, key=self.account_key)
+ self.assertFalse(self.msg.check_validation(
+ bad_validation, self.account_key.public_key()))
+
+ def test_gen_response(self):
+ with mock.patch('acme.challenges.DNS.gen_validation') as mock_gen:
+ mock_gen.return_value = mock.sentinel.validation
+ response = self.msg.gen_response(self.account_key)
+ from acme.challenges import DNSResponse
+ self.assertTrue(isinstance(response, DNSResponse))
+ self.assertEqual(response.validation, mock.sentinel.validation)
+
+ def test_validation_domain_name(self):
+ self.assertEqual(
+ '_acme-challenge.le.wtf', self.msg.validation_domain_name('le.wtf'))
+
class DNSResponseTest(unittest.TestCase):
def setUp(self):
+ self.key = jose.JWKRSA(key=KEY)
+
+ from acme.challenges import DNS
+ self.chall = DNS(token=jose.b64decode(
+ b"evaGxfADs6pSRb2LAv9IZf17Dt3juxGJ-PCt92wr-oA"))
+ self.validation = jose.JWS.sign(
+ payload=self.chall.json_dumps(sort_keys=True).encode(),
+ key=self.key, alg=jose.RS256)
+
from acme.challenges import DNSResponse
- self.msg = DNSResponse()
- self.jmsg = {
+ self.msg = DNSResponse(validation=self.validation)
+ self.jmsg_to = {
'resource': 'challenge',
'type': 'dns',
+ 'validation': self.validation,
+ }
+ self.jmsg_from = {
+ 'resource': 'challenge',
+ 'type': 'dns',
+ 'validation': self.validation.to_json(),
}
def test_to_partial_json(self):
- self.assertEqual(self.jmsg, self.msg.to_partial_json())
+ self.assertEqual(self.jmsg_to, self.msg.to_partial_json())
def test_from_json(self):
from acme.challenges import DNSResponse
- self.assertEqual(self.msg, DNSResponse.from_json(self.jmsg))
+ self.assertEqual(self.msg, DNSResponse.from_json(self.jmsg_from))
def test_from_json_hashable(self):
from acme.challenges import DNSResponse
- hash(DNSResponse.from_json(self.jmsg))
+ hash(DNSResponse.from_json(self.jmsg_from))
+
+ def test_check_validation(self):
+ self.assertTrue(
+ self.msg.check_validation(self.chall, self.key.public_key()))
if __name__ == '__main__':
diff --git a/acme/acme/client_test.py b/acme/acme/client_test.py
index dcc0832e3db..ed0c6f65adb 100644
--- a/acme/acme/client_test.py
+++ b/acme/acme/client_test.py
@@ -55,7 +55,8 @@ def setUp(self):
authzr_uri = 'https://www.letsencrypt-demo.org/acme/authz/1'
challb = messages.ChallengeBody(
uri=(authzr_uri + '/1'), status=messages.STATUS_VALID,
- chall=challenges.DNS(token='foo'))
+ chall=challenges.DNS(token=jose.b64decode(
+ 'evaGxfADs6pSRb2LAv9IZf17Dt3juxGJ-PCt92wr-oA')))
self.challr = messages.ChallengeResource(
body=challb, authzr_uri=authzr_uri)
self.authz = messages.Authorization(
@@ -155,7 +156,7 @@ def test_answer_challenge(self):
self.response.links['up'] = {'url': self.challr.authzr_uri}
self.response.json.return_value = self.challr.body.to_json()
- chall_response = challenges.DNSResponse()
+ chall_response = challenges.DNSResponse(validation=None)
self.client.answer_challenge(self.challr.body, chall_response)
@@ -164,8 +165,9 @@ def test_answer_challenge(self):
self.challr.body.update(uri='foo'), chall_response)
def test_answer_challenge_missing_next(self):
- self.assertRaises(errors.ClientError, self.client.answer_challenge,
- self.challr.body, challenges.DNSResponse())
+ self.assertRaises(
+ errors.ClientError, self.client.answer_challenge,
+ self.challr.body, challenges.DNSResponse(validation=None))
def test_retry_after_date(self):
self.response.headers['Retry-After'] = 'Fri, 31 Dec 1999 23:59:59 GMT'
diff --git a/acme/acme/messages_test.py b/acme/acme/messages_test.py
index 481c2e2a32b..608ada2c27d 100644
--- a/acme/acme/messages_test.py
+++ b/acme/acme/messages_test.py
@@ -185,7 +185,8 @@ class ChallengeBodyTest(unittest.TestCase):
"""Tests for acme.messages.ChallengeBody."""
def setUp(self):
- self.chall = challenges.DNS(token='foo')
+ self.chall = challenges.DNS(token=jose.b64decode(
+ 'evaGxfADs6pSRb2LAv9IZf17Dt3juxGJ-PCt92wr-oA'))
from acme.messages import ChallengeBody
from acme.messages import Error
@@ -201,7 +202,7 @@ def setUp(self):
'uri': 'http://challb',
'status': self.status,
'type': 'dns',
- 'token': 'foo',
+ 'token': 'evaGxfADs6pSRb2LAv9IZf17Dt3juxGJ-PCt92wr-oA',
'error': error,
}
self.jobj_from = self.jobj_to.copy()
@@ -224,7 +225,8 @@ def test_from_json_hashable(self):
hash(ChallengeBody.from_json(self.jobj_from))
def test_proxy(self):
- self.assertEqual('foo', self.challb.token)
+ self.assertEqual(jose.b64decode(
+ 'evaGxfADs6pSRb2LAv9IZf17Dt3juxGJ-PCt92wr-oA'), self.challb.token)
class AuthorizationTest(unittest.TestCase):
| https://api.github.com/repos/certbot/certbot/pulls/753 | 2015-09-06T12:09:10Z | 2015-09-14T20:21:38Z | 2015-09-14T20:21:38Z | 2016-05-06T19:21:33Z | 2,810 | certbot/certbot | 914 |
|
extractor: youtube: Set extension of AAC audio formats to m4a. | diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index fb7c4283078..96ead331042 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -236,11 +236,13 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
'136': 'mp4',
'137': 'mp4',
'138': 'mp4',
- '139': 'mp4',
- '140': 'mp4',
- '141': 'mp4',
'160': 'mp4',
+ # Dash mp4 audio
+ '139': 'm4a',
+ '140': 'm4a',
+ '141': 'm4a',
+
# Dash webm
'171': 'webm',
'172': 'webm',
| This, in particular, eases downloading both audio and videos in DASH formats
before muxing them, which alleviates the problem that I exposed on issue
Furthermore, one may argue that this is, indeed, the case for correctness's
sake.
Signed-off-by: Rogério Brito [email protected]
| https://api.github.com/repos/ytdl-org/youtube-dl/pulls/1622 | 2013-10-18T20:54:29Z | 2013-10-22T22:16:26Z | 2013-10-22T22:16:26Z | 2015-09-20T18:15:34Z | 210 | ytdl-org/youtube-dl | 50,220 |
A module to create and delete boundary.com meters | diff --git a/library/monitoring/boundary_meter b/library/monitoring/boundary_meter
new file mode 100644
index 00000000000000..71ea2b548c4989
--- /dev/null
+++ b/library/monitoring/boundary_meter
@@ -0,0 +1,271 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+"""
+Ansible module to add boundary meters.
+
+(c) 2013, curtis <[email protected]>
+
+This file is part of Ansible
+
+Ansible is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+Ansible is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import json
+import datetime
+import urllib2
+import base64
+import os
+
+DOCUMENTATION = '''
+
+module: boundary_meter
+short_description: Manage boundary meters
+description:
+ - This module manages boundary meters
+version_added: "1.3"
+author: [email protected]
+requirements:
+ - Boundary API access
+ - bprobe is required to send data, but not to register a meter
+ - Python urllib2
+options:
+ name:
+ description:
+ - meter name
+ required: true
+ state:
+ description:
+ - Whether to create or remove the client from boundary
+ required: false
+ default: true
+ choices: ["present", "absent"]
+ apiid:
+ description:
+ - Organizations boundary API ID
+ required: true
+ apikey:
+ description:
+ - Organizations boundary API KEY
+ required: true
+
+notes:
+ - This module does not yet support boundary tags.
+
+'''
+
+EXAMPLES='''
+- name: Create meter
+ boundary_meter: apiid=AAAAAA api_key=BBBBBB state=present name={{ inventory_hostname }}"
+
+- name: Delete meter
+ boundary_meter: apiid=AAAAAA api_key=BBBBBB state=absent name={{ inventory_hostname }}"
+
+'''
+
+try:
+ import urllib2
+ HAS_URLLIB2 = True
+except ImportError:
+ HAS_URLLIB2 = False
+
+api_host = "api.boundary.com"
+config_directory = "/etc/bprobe"
+
+# "resource" like thing or apikey?
+def auth_encode(apikey):
+ auth = base64.standard_b64encode(apikey)
+ auth.replace("\n", "")
+ return auth
+
+def build_url(name, apiid, action, meter_id=None, cert_type=None):
+ if action == "create":
+ return 'https://%s/%s/meters' % (api_host, apiid)
+ elif action == "search":
+ return "https://%s/%s/meters?name=%s" % (api_host, apiid, name)
+ elif action == "certificates":
+ return "https://%s/%s/meters/%s/%s.pem" % (api_host, apiid, meter_id, cert_type)
+ elif action == "tags":
+ return "https://%s/%s/meters/%s/tags" % (api_host, apiid, meter_id)
+ elif action == "delete":
+ return "https://%s/%s/meters/%s" % (api_host, apiid, meter_id)
+
+def http_request(name, apiid, apikey, action, meter_id=None, cert_type=None):
+
+ if meter_id is None:
+ url = build_url(name, apiid, action)
+ else:
+ if cert_type is None:
+ url = build_url(name, apiid, action, meter_id)
+ else:
+ url = build_url(name, apiid, action, meter_id, cert_type)
+
+ auth = auth_encode(apikey)
+ request = urllib2.Request(url)
+ request.add_header("Authorization", "Basic %s" % (auth))
+ request.add_header("Content-Type", "application/json")
+ return request
+
+def create_meter(module, name, apiid, apikey):
+
+ meters = search_meter(module, name, apiid, apikey)
+
+ if len(meters) > 0:
+ # If the meter already exists, do nothing
+ module.exit_json(status="Meter " + name + " already exists",changed=False)
+ else:
+ # If it doesn't exist, create it
+ request = http_request(name, apiid, apikey, action="create")
+ # A create request seems to need a json body with the name of the meter in it
+ body = '{"name":"' + name + '"}'
+ request.add_data(body)
+
+ try:
+ result = urllib2.urlopen(request)
+ except urllib2.URLError, e:
+ module.fail_json(msg="Failed to connect to api host to create meter")
+
+ # If the config directory doesn't exist, create it
+ if not os.path.exists(config_directory):
+ try:
+ os.makedirs(config_directory)
+ except:
+ module.fail_json("Could not create " + config_directory)
+
+
+ # Download both cert files from the api host
+ types = ['key', 'cert']
+ for cert_type in types:
+ try:
+ # If we can't open the file it's not there, so we should download it
+ cert_file = open('%s/%s.pem' % (config_directory,cert_type))
+ except IOError:
+ # Now download the file...
+ rc = download_request(module, name, apiid, apikey, cert_type)
+ if rc == False:
+ module.fail_json("Download request for " + cert_type + ".pem failed")
+
+ return 0, "Meter " + name + " created"
+
+def search_meter(module, name, apiid, apikey):
+
+ request = http_request(name, apiid, apikey, action="search")
+
+ try:
+ result = urllib2.urlopen(request)
+ except urllib2.URLError, e:
+ module.fail_json("Failed to connect to api host to search for meter")
+
+ # Return meters
+ return json.loads(result.read())
+
+def get_meter_id(module, name, apiid, apikey):
+ # In order to delete the meter we need its id
+ meters = search_meter(module, name, apiid, apikey)
+
+ if len(meters) > 0:
+ return meters[0]['id']
+ else:
+ return None
+
+def delete_meter(module, name, apiid, apikey):
+
+ meter_id = get_meter_id(module, name, apiid, apikey)
+
+ if meter_id is None:
+ return 1, "Meter does not exist, so can't delete it"
+ else:
+ action = "delete"
+ request = http_request(name, apiid, apikey, action, meter_id)
+ # See http://stackoverflow.com/questions/4511598/how-to-make-http-delete-method-using-urllib2
+ # urllib2 only does GET or POST I believe, but here we need delete
+ request.get_method = lambda: 'DELETE'
+
+ try:
+ result = urllib2.urlopen(request)
+ except urllib2.URLError, e:
+ module.fail_json("Failed to connect to api host to delete meter")
+
+ # Each new meter gets a new key.pem and ca.pem file, so they should be deleted
+ types = ['cert', 'key']
+ for cert_type in types:
+ try:
+ cert_file = '%s/%s.pem' % (config_directory,cert_type)
+ os.remove(cert_file)
+ except OSError, e: ## if failed, report it back to the user ##
+ module.fail_json("Failed to remove " + cert_type + ".pem file")
+
+ return 0, "Meter " + name + " deleted"
+
+def download_request(module, name, apiid, apikey, cert_type):
+
+ meter_id = get_meter_id(module, name, apiid, apikey)
+
+ if meter_id is not None:
+ action = "certificates"
+ request = http_request(name, apiid, apikey, action, meter_id, cert_type)
+
+ try:
+ result = urllib2.urlopen(request)
+ except urllib2.URLError, e:
+ module.fail_json("Failed to connect to api host to download certificate")
+
+ if result:
+ try:
+ cert_file_path = '%s/%s.pem' % (config_directory,cert_type)
+ body = result.read()
+ cert_file = open(cert_file_path, 'w')
+ cert_file.write(body)
+ cert_file.close
+ os.chmod(cert_file_path, 0o600)
+ except:
+ module.fail_json("Could not write to certificate file")
+
+ return True
+ else:
+ module.fail_json("Could not get meter id")
+
+def main():
+
+ if not HAS_URLLIB2:
+ module.fail_json(msg="urllib2 is not installed")
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ name=dict(required=False),
+ apikey=dict(required=True),
+ apiid=dict(required=True),
+ )
+ )
+
+ state = module.params['state']
+ name= module.params['name']
+ apikey = module.params['api_key']
+ apiid = module.params['api_id']
+
+ if state == "present":
+ (rc, result) = create_meter(module, name, apiid, apikey)
+
+ if state == "absent":
+ (rc, result) = delete_meter(module, name, apiid, apikey)
+
+ if rc != 0:
+ module.fail_json(msg=result)
+
+ module.exit_json(status=result,changed=True)
+
+# include magic from lib/ansible/module_common.py
+#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
+main()
\ No newline at end of file
| This module allows the creation and deletion of meters.
It also brings down the required .pem files from boundary's API and installs them in /etc/bprobe. Much of this was created using the boundary puppet module as an example. Currently it does not support boundary tags.
This is my first pull request, so if there is anything wrong, and I'm sure there is even though I made sure to quadruple check, will be glad to fix it. Thanks.
| https://api.github.com/repos/ansible/ansible/pulls/3272 | 2013-06-19T22:05:32Z | 2013-08-03T18:20:52Z | 2013-08-03T18:20:52Z | 2019-04-24T18:33:49Z | 2,378 | ansible/ansible | 48,986 |
Added azure api version patch | diff --git a/README.md b/README.md
index 2e95b79bc..9c9363faa 100644
--- a/README.md
+++ b/README.md
@@ -377,7 +377,7 @@ We then stream the model's messages, code, and your system's outputs to the term
Thank you for your interest in contributing! We welcome involvement from the community.
-Please see our [Contributing Guidelines](CONTRIBUTING.md) for more details on how to get involved.
+Please see our [Contributing Guidelines](docs/CONTRIBUTING.md) for more details on how to get involved.
# Roadmap
diff --git a/interpreter/core/llm/setup_openai_coding_llm.py b/interpreter/core/llm/setup_openai_coding_llm.py
index cbf61fc5e..b6e2dad1a 100644
--- a/interpreter/core/llm/setup_openai_coding_llm.py
+++ b/interpreter/core/llm/setup_openai_coding_llm.py
@@ -96,6 +96,8 @@ def coding_llm(messages):
params["api_base"] = interpreter.api_base
if interpreter.api_key:
params["api_key"] = interpreter.api_key
+ if interpreter.api_version:
+ params["api_version"] = interpreter.api_version
if interpreter.max_tokens:
params["max_tokens"] = interpreter.max_tokens
if interpreter.temperature is not None:
| ### Describe the changes you have made:
- Azure openai uses an "api version" parameter when running open interpreter. Thus, added this as a param in the setup file
- Fixed broken link to CONTRIBUTING.md in the README.md
### Reference any relevant issue (Fixes #000)
- [ ] I have performed a self-review of my code:
### I have tested the code on the following OS:
- [ ] Windows
- [ ] MacOS
- [ ] Linux
### AI Language Model (if applicable)
- [ ] GPT4
- [ ] GPT3
- [ ] Llama 7B
- [ ] Llama 13B
- [ ] Llama 34B
- [ ] Huggingface model (Please specify which one)
| https://api.github.com/repos/OpenInterpreter/open-interpreter/pulls/786 | 2023-11-23T09:55:58Z | 2023-11-26T02:13:10Z | 2023-11-26T02:13:10Z | 2023-11-26T19:32:02Z | 319 | OpenInterpreter/open-interpreter | 40,732 |
Update README.md | diff --git a/README.md b/README.md
index b4605454..00e2e0ce 100644
--- a/README.md
+++ b/README.md
@@ -85,7 +85,7 @@ wget https://github.com/TencentARC/GFPGAN/releases/download/v0.2.0/GFPGANCleanv1
**Inference!**
```bash
-python inference_gfpgan.py --upscale_factor 2 --test_path inputs/whole_imgs --save_root results
+python inference_gfpgan.py --upscale 2 --test_path inputs/whole_imgs --save_root results
```
## :european_castle: Model Zoo
| upscale_factor is not a recognized argument. | https://api.github.com/repos/TencentARC/GFPGAN/pulls/43 | 2021-08-17T12:13:14Z | 2021-08-18T00:06:37Z | 2021-08-18T00:06:36Z | 2021-08-18T06:17:11Z | 150 | TencentARC/GFPGAN | 10,926 |
Make DefaultDataCollator importable from root | diff --git a/docs/source/main_classes/data_collator.rst b/docs/source/main_classes/data_collator.rst
index 4232d05abcafc..4893ebf252664 100644
--- a/docs/source/main_classes/data_collator.rst
+++ b/docs/source/main_classes/data_collator.rst
@@ -29,6 +29,13 @@ Default data collator
.. autofunction:: transformers.data.data_collator.default_data_collator
+DefaultDataCollator
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: transformers.data.data_collator.DefaultDataCollator
+ :members:
+
+
DataCollatorWithPadding
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py
index b60f1fe304e84..bd1383a360db5 100755
--- a/src/transformers/__init__.py
+++ b/src/transformers/__init__.py
@@ -92,6 +92,7 @@
"DataCollatorForTokenClassification",
"DataCollatorForWholeWordMask",
"DataCollatorWithPadding",
+ "DefaultDataCollator",
"default_data_collator",
],
"feature_extraction_sequence_utils": ["SequenceFeatureExtractor"],
@@ -2079,6 +2080,7 @@
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
+ DefaultDataCollator,
default_data_collator,
)
from .feature_extraction_sequence_utils import SequenceFeatureExtractor
diff --git a/src/transformers/data/__init__.py b/src/transformers/data/__init__.py
index bd78404c6682b..7ed4859dd420a 100644
--- a/src/transformers/data/__init__.py
+++ b/src/transformers/data/__init__.py
@@ -24,6 +24,7 @@
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
+ DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
diff --git a/src/transformers/data/data_collator.py b/src/transformers/data/data_collator.py
index 1ebd0a0b7ab4f..8b16280e3f713 100644
--- a/src/transformers/data/data_collator.py
+++ b/src/transformers/data/data_collator.py
@@ -72,6 +72,24 @@ def default_data_collator(features: List[InputDataClass], return_tensors="pt") -
@dataclass
class DefaultDataCollator(DataCollatorMixin):
+ """
+ Very simple data collator that simply collates batches of dict-like objects and performs special handling for
+ potential keys named:
+
+ - ``label``: handles a single value (int or float) per object
+ - ``label_ids``: handles a list of values per object
+
+ Does not do any additional preprocessing: property names of the input object will be used as corresponding inputs
+ to the model. See glue and ner for example of how it's useful.
+
+ This is an object (like other data collators) rather than a pure function like default_data_collator. This can be
+ helpful if you need to set a return_tensors value at initialization.
+
+ Args:
+ return_tensors (:obj:`str`):
+ The type of Tensor to return. Allowable values are "np", "pt" and "tf".
+ """
+
return_tensors: str = "pt"
def __call__(self, features: List[Dict[str, Any]], return_tensors=None) -> Dict[str, Any]:
@@ -214,6 +232,8 @@ class DataCollatorWithPadding:
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
7.5 (Volta).
+ return_tensors (:obj:`str`):
+ The type of Tensor to return. Allowable values are "np", "pt" and "tf".
"""
tokenizer: PreTrainedTokenizerBase
@@ -266,6 +286,8 @@ class DataCollatorForTokenClassification(DataCollatorMixin):
7.5 (Volta).
label_pad_token_id (:obj:`int`, `optional`, defaults to -100):
The id to use when padding the labels (-100 will be automatically ignore by PyTorch loss functions).
+ return_tensors (:obj:`str`):
+ The type of Tensor to return. Allowable values are "np", "pt" and "tf".
"""
tokenizer: PreTrainedTokenizerBase
@@ -519,6 +541,8 @@ class DataCollatorForSeq2Seq:
7.5 (Volta).
label_pad_token_id (:obj:`int`, `optional`, defaults to -100):
The id to use when padding the labels (-100 will be automatically ignored by PyTorch loss functions).
+ return_tensors (:obj:`str`):
+ The type of Tensor to return. Allowable values are "np", "pt" and "tf".
"""
tokenizer: PreTrainedTokenizerBase
@@ -591,6 +615,8 @@ class DataCollatorForLanguageModeling(DataCollatorMixin):
The probability with which to (randomly) mask tokens in the input, when :obj:`mlm` is set to :obj:`True`.
pad_to_multiple_of (:obj:`int`, `optional`):
If set will pad the sequence to a multiple of the provided value.
+ return_tensors (:obj:`str`):
+ The type of Tensor to return. Allowable values are "np", "pt" and "tf".
.. note::
| This is a small PR to fix an oversight - the DefaultDataCollator class was not importable from root (this is separate from the `default_data_collator` function). It also adds some missing docstring arguments, and the missing docstring for DefaultDataCollator. | https://api.github.com/repos/huggingface/transformers/pulls/14588 | 2021-12-01T17:24:37Z | 2021-12-03T20:15:10Z | 2021-12-03T20:15:09Z | 2021-12-03T20:15:10Z | 1,266 | huggingface/transformers | 12,204 |
Add import LongLLMLinguaPostprocessor for LongLLMLingua.ipynb | diff --git a/docs/examples/node_postprocessor/LongLLMLingua.ipynb b/docs/examples/node_postprocessor/LongLLMLingua.ipynb
index e325a32b323c0..0f7f6076b5313 100644
--- a/docs/examples/node_postprocessor/LongLLMLingua.ipynb
+++ b/docs/examples/node_postprocessor/LongLLMLingua.ipynb
@@ -175,6 +175,7 @@
"source": [
"from llama_index.query_engine import RetrieverQueryEngine\n",
"from llama_index.response_synthesizers import CompactAndRefine\n",
+ "from llama_index.indices.postprocessor import LongLLMLinguaPostprocessor\n",
"\n",
"node_postprocessor = LongLLMLinguaPostprocessor(\n",
" instruction_str=\"Given the context, please answer the final question\",\n",
| # Description
Please include a summary of the change and which issue is fixed. Please also include relevant motivation and context. List any dependencies that are required for this change.
Fixes # (issue) **NameError: name 'LongLLMLinguaPostprocessor' is not defined**
## Type of Change
Please delete options that are not relevant.
- [ x] Bug fix (non-breaking change which fixes an issue)
- [ ] New feature (non-breaking change which adds functionality)
- [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected)
- [ ] This change requires a documentation update
# How Has This Been Tested?
Please describe the tests that you ran to verify your changes. Provide instructions so we can reproduce. Please also list any relevant details for your test configuration
- [ ] Added new unit/integration tests
- [ ] Added new notebook (that tests end-to-end)
- [ ] I stared at the code and made sure it makes sense
# Suggested Checklist:
- [x] I have performed a self-review of my own code
- [ ] I have commented my code, particularly in hard-to-understand areas
- [ ] I have made corresponding changes to the documentation
- [x] My changes generate no new warnings
- [x] I have added tests that prove my fix is effective or that my feature works
- [ ] New and existing unit tests pass locally with my changes
- [ ] I ran `make format; make lint` to appease the lint gods
| https://api.github.com/repos/run-llama/llama_index/pulls/8519 | 2023-10-27T04:55:16Z | 2023-10-27T07:03:38Z | 2023-10-27T07:03:38Z | 2023-10-27T07:39:57Z | 195 | run-llama/llama_index | 6,090 |
Feat/api gateway dynamodb query | diff --git a/localstack/services/apigateway/invocations.py b/localstack/services/apigateway/invocations.py
index 74d5c53b60f2f..7e557262e8fe7 100644
--- a/localstack/services/apigateway/invocations.py
+++ b/localstack/services/apigateway/invocations.py
@@ -481,6 +481,37 @@ def invoke_rest_api_integration_backend(invocation_context: ApiInvocationContext
table.put_item(Item=event_data)
response = requests_response(event_data)
return response
+ elif "Query" in action:
+ template = integration["requestTemplates"][APPLICATION_JSON]
+
+ if template is None:
+ msg = "No request template is defined in the integration."
+ LOG.info("%s Existing: %s", msg, response_templates)
+ return make_error_response(msg, 404)
+
+ response_template = response_templates.get(APPLICATION_JSON)
+
+ if response_template is None:
+ msg = "Invalid response template defined in integration response."
+ LOG.info("%s Existing: %s", msg, response_templates)
+ return make_error_response(msg, 404)
+
+ request_templates = RequestTemplates()
+ payload = request_templates.render(invocation_context)
+ payload = json.loads(payload)
+
+ dynamo_client = aws_stack.connect_to_resource("dynamodb")
+ table = dynamo_client.Table(table_name)
+ response = table.get_item(**payload)
+
+ if "Item" not in response:
+ msg = "Item not found in DynamoDB."
+ LOG.info("%s Existing: %s", msg, response_template)
+ return make_error_response(msg, 404)
+
+ response = requests_response(response["Item"])
+ invocation_context.response = response
+ return response
else:
raise Exception(
'API Gateway action uri "%s", integration type %s not yet implemented'
| Support API Gateway to Dynamodb intergration queries | https://api.github.com/repos/localstack/localstack/pulls/7572 | 2023-01-29T16:18:30Z | 2023-02-25T14:34:02Z | 2023-02-25T14:34:02Z | 2023-02-25T14:34:03Z | 417 | localstack/localstack | 28,730 |
Use path of socket consistently accross document. | diff --git a/docs/deploying/uwsgi.rst b/docs/deploying/uwsgi.rst
index 183bdb69fa..fc991e72be 100644
--- a/docs/deploying/uwsgi.rst
+++ b/docs/deploying/uwsgi.rst
@@ -29,7 +29,7 @@ Given a flask application in myapp.py, use the following command:
.. sourcecode:: text
- $ uwsgi -s /tmp/uwsgi.sock --manage-script-name --mount /yourapplication=myapp:app
+ $ uwsgi -s /tmp/yourapplication.sock --manage-script-name --mount /yourapplication=myapp:app
The ``--manage-script-name`` will move the handling of ``SCRIPT_NAME`` to uwsgi,
since its smarter about that. It is used together with the ``--mount`` directive
| Fixes #1975
| https://api.github.com/repos/pallets/flask/pulls/1976 | 2016-08-03T11:43:31Z | 2016-08-03T16:22:14Z | 2016-08-03T16:22:14Z | 2020-11-14T04:42:41Z | 188 | pallets/flask | 20,817 |
fix letv by new api | diff --git a/src/you_get/extractor/letv.py b/src/you_get/extractor/letv.py
index 54aa28b212..8b27b92457 100644
--- a/src/you_get/extractor/letv.py
+++ b/src/you_get/extractor/letv.py
@@ -21,23 +21,24 @@ def get_key(t):
t += e
return t ^ 185025305
+
def video_info(vid):
tn = get_timestamp()
key = get_key(tn)
- url = 'http://api.letv.com/mms/out/video/play?id={}&platid=1&splatid=101&format=1&tkey={}&domain=http%3A%2F%2Fwww.letv.com'.format(vid, key)
+ url="http://api.letv.com/mms/out/common/geturl?platid=3&splatid=301&playid=0&vtype=9,13,21,28&version=2.0&tss=no&vid={}&domain=www.letv.com&tkey={}".format(vid,key)
r = get_content(url, decoded=False)
- xml_obj = ET.fromstring(r)
- info = json.loads(xml_obj.find("playurl").text)
- title = info.get('title')
- urls = info.get('dispatch')
- for k in urls.keys():
- url = urls[k][0]
- break
- url += '&termid=1&format=0&hwtype=un&ostype=Windows7&tag=letv&sign=letv&expect=1&pay=0&rateid={}'.format(k)
- return url, title
-
-def letv_download_by_vid(vid, output_dir='.', merge=True, info_only=False):
- url, title = video_info(vid)
+ info=json.loads(str(r,"utf-8"))
+ size=0
+ for i in info["data"][0]["infos"]: #0 means only one file not truncated.need to upgrade
+ if int(i["gsize"])>size:
+ size=int(i["gsize"])
+ url=i["mainUrl"]
+
+ url += '&termid=1&format=0&hwtype=un&ostype=Windows7&tag=letv&sign=letv&expect=1&pay=0&rateid=1300' #{}'.format(k)
+ return url
+
+def letv_download_by_vid(vid,title, output_dir='.', merge=True, info_only=False):
+ url= video_info(vid)
_, _, size = url_info(url)
ext = 'flv'
print_info(site_info, title, ext, size)
@@ -45,12 +46,14 @@ def letv_download_by_vid(vid, output_dir='.', merge=True, info_only=False):
download_urls([url], title, ext, size, output_dir=output_dir, merge=merge)
def letv_download(url, output_dir='.', merge=True, info_only=False):
+ html = get_content(url)
+ #to get title
if re.match(r'http://www.letv.com/ptv/vplay/(\d+).html', url):
vid = match1(url, r'http://www.letv.com/ptv/vplay/(\d+).html')
else:
- html = get_content(url)
vid = match1(html, r'vid="(\d+)"')
- letv_download_by_vid(vid, output_dir=output_dir, merge=merge, info_only=info_only)
+ title=match1(html,r'name="irTitle" content="(.*?)"')
+ letv_download_by_vid(vid,title, output_dir=output_dir, merge=merge, info_only=info_only)
site_info = "letv.com"
| refer to #355
Thanks to letv. Totallly hell of javascript. I hate javascript.
Besides,i have no idea whether the json from api contains serveral pieces of video.
TODO: add support for letv live streaming
| https://api.github.com/repos/soimort/you-get/pulls/361 | 2014-07-14T14:04:28Z | 2014-07-15T01:12:49Z | 2014-07-15T01:12:49Z | 2014-07-15T01:16:59Z | 857 | soimort/you-get | 21,203 |
[murrtube] Add new extractor | diff --git a/yt_dlp/extractor/extractors.py b/yt_dlp/extractor/extractors.py
index 194fe4be3b0..9dc54333a69 100644
--- a/yt_dlp/extractor/extractors.py
+++ b/yt_dlp/extractor/extractors.py
@@ -866,6 +866,7 @@
MTVItaliaProgrammaIE,
)
from .muenchentv import MuenchenTVIE
+from .murrtube import MurrtubeIE, MurrtubeUserIE
from .musescore import MuseScoreIE
from .mwave import MwaveIE, MwaveMeetGreetIE
from .mxplayer import (
diff --git a/yt_dlp/extractor/murrtube.py b/yt_dlp/extractor/murrtube.py
new file mode 100644
index 00000000000..1eb5de66063
--- /dev/null
+++ b/yt_dlp/extractor/murrtube.py
@@ -0,0 +1,165 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import functools
+import json
+
+from .common import InfoExtractor
+from ..utils import (
+ ExtractorError,
+ OnDemandPagedList,
+ determine_ext,
+ int_or_none,
+ try_get,
+)
+
+
+class MurrtubeIE(InfoExtractor):
+ _VALID_URL = r'''(?x)
+ (?:
+ murrtube:|
+ https?://murrtube\.net/videos/(?P<slug>[a-z0-9\-]+)\-
+ )
+ (?P<id>[a-f0-9]{8}\-[a-f0-9]{4}\-[a-f0-9]{4}\-[a-f0-9]{4}\-[a-f0-9]{12})
+ '''
+ _TEST = {
+ 'url': 'https://murrtube.net/videos/inferno-x-skyler-148b6f2a-fdcc-4902-affe-9c0f41aaaca0',
+ 'md5': '169f494812d9a90914b42978e73aa690',
+ 'info_dict': {
+ 'id': '148b6f2a-fdcc-4902-affe-9c0f41aaaca0',
+ 'ext': 'mp4',
+ 'title': 'Inferno X Skyler',
+ 'description': 'Humping a very good slutty sheppy (roomate)',
+ 'thumbnail': r're:^https?://.*\.jpg$',
+ 'duration': 284,
+ 'uploader': 'Inferno Wolf',
+ 'age_limit': 18,
+ 'comment_count': int,
+ 'view_count': int,
+ 'like_count': int,
+ 'tags': ['hump', 'breed', 'Fursuit', 'murrsuit', 'bareback'],
+ }
+ }
+
+ def _download_gql(self, video_id, op, note=None, fatal=True):
+ result = self._download_json(
+ 'https://murrtube.net/graphql',
+ video_id, note, data=json.dumps(op).encode(), fatal=fatal,
+ headers={'Content-Type': 'application/json'})
+ return result['data']
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+ data = self._download_gql(video_id, {
+ 'operationName': 'Medium',
+ 'variables': {
+ 'id': video_id,
+ },
+ 'query': '''\
+query Medium($id: ID!) {
+ medium(id: $id) {
+ title
+ description
+ key
+ duration
+ commentsCount
+ likesCount
+ viewsCount
+ thumbnailKey
+ tagList
+ user {
+ name
+ __typename
+ }
+ __typename
+ }
+}'''})
+ meta = data['medium']
+
+ storage_url = 'https://storage.murrtube.net/murrtube/'
+ format_url = storage_url + meta.get('key', '')
+ thumbnail = storage_url + meta.get('thumbnailKey', '')
+
+ if determine_ext(format_url) == 'm3u8':
+ formats = self._extract_m3u8_formats(
+ format_url, video_id, 'mp4', entry_protocol='m3u8_native', fatal=False)
+ else:
+ formats = [{'url': format_url}]
+
+ return {
+ 'id': video_id,
+ 'title': meta.get('title'),
+ 'description': meta.get('description'),
+ 'formats': formats,
+ 'thumbnail': thumbnail,
+ 'duration': int_or_none(meta.get('duration')),
+ 'uploader': try_get(meta, lambda x: x['user']['name']),
+ 'view_count': meta.get('viewsCount'),
+ 'like_count': meta.get('likesCount'),
+ 'comment_count': meta.get('commentsCount'),
+ 'tags': meta.get('tagList'),
+ 'age_limit': 18,
+ }
+
+
+class MurrtubeUserIE(MurrtubeIE):
+ IE_DESC = 'Murrtube user profile'
+ _VALID_URL = r'https?://murrtube\.net/(?P<id>[^/]+)$'
+ _TEST = {
+ 'url': 'https://murrtube.net/stormy',
+ 'info_dict': {
+ 'id': 'stormy',
+ },
+ 'playlist_mincount': 27,
+ }
+ _PAGE_SIZE = 10
+
+ def _fetch_page(self, username, user_id, page):
+ data = self._download_gql(username, {
+ 'operationName': 'Media',
+ 'variables': {
+ 'limit': self._PAGE_SIZE,
+ 'offset': page * self._PAGE_SIZE,
+ 'sort': 'latest',
+ 'userId': user_id,
+ },
+ 'query': '''\
+query Media($q: String, $sort: String, $userId: ID, $offset: Int!, $limit: Int!) {
+ media(q: $q, sort: $sort, userId: $userId, offset: $offset, limit: $limit) {
+ id
+ __typename
+ }
+}'''},
+ 'Downloading page {0}'.format(page + 1))
+ if data is None:
+ raise ExtractorError(f'Failed to retrieve video list for page {page + 1}')
+
+ media = data['media']
+
+ for entry in media:
+ yield self.url_result('murrtube:{0}'.format(entry['id']), MurrtubeIE.ie_key())
+
+ def _real_extract(self, url):
+ username = self._match_id(url)
+ data = self._download_gql(username, {
+ 'operationName': 'User',
+ 'variables': {
+ 'id': username,
+ },
+ 'query': '''\
+query User($id: ID!) {
+ user(id: $id) {
+ id
+ __typename
+ }
+}'''},
+ 'Downloading user info')
+ if data is None:
+ raise ExtractorError('Failed to fetch user info')
+
+ user = data['user']
+
+ entries = OnDemandPagedList(functools.partial(
+ self._fetch_page, username, user.get('id')), self._PAGE_SIZE)
+
+ return self.playlist_result(entries, username)
| ## Please follow the guide below
- You will be asked some questions, please read them **carefully** and answer honestly
- Put an `x` into all the boxes [ ] relevant to your *pull request* (like that [x])
- Use *Preview* tab to see how your *pull request* will actually look like
---
### Before submitting a *pull request* make sure you have:
- [x] At least skimmed through [contributing guidelines](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#developer-instructions) including [yt-dlp coding conventions](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#yt-dlp-coding-conventions)
- [x] [Searched](https://github.com/yt-dlp/yt-dlp/search?q=is%3Apr&type=Issues) the bugtracker for similar pull requests
- [x] Checked the code with [flake8](https://pypi.python.org/pypi/flake8)
### In order to be accepted and merged into yt-dlp each piece of code must be in public domain or released under [Unlicense](http://unlicense.org/). Check one of the following options:
- [x] I am the original author of this code and I am willing to release it under [Unlicense](http://unlicense.org/)
- [ ] I am not the original author of this code but it is in public domain or released under [Unlicense](http://unlicense.org/) (provide reliable evidence)
### What is the purpose of your *pull request*?
- [ ] Bug fix
- [ ] Improvement
- [x] New extractor
- [ ] New feature
---
### Description of your *pull request* and other information
Add support for (NSFW) [Murrtube](https://murrtube.net/), a site that hosts furry porn videos. | https://api.github.com/repos/yt-dlp/yt-dlp/pulls/2387 | 2022-01-19T05:26:17Z | 2022-02-15T11:10:16Z | 2022-02-15T11:10:16Z | 2022-02-15T11:10:16Z | 1,715 | yt-dlp/yt-dlp | 7,762 |
Find project root correctly | diff --git a/src/black/__init__.py b/src/black/__init__.py
index 2b2d3d88c73..d4c6e62bdbf 100644
--- a/src/black/__init__.py
+++ b/src/black/__init__.py
@@ -5825,8 +5825,8 @@ def gen_python_files(
def find_project_root(srcs: Iterable[str]) -> Path:
"""Return a directory containing .git, .hg, or pyproject.toml.
- That directory can be one of the directories passed in `srcs` or their
- common parent.
+ That directory will be a common parent of all files and directories
+ passed in `srcs`.
If no directory in the tree contains a marker that would specify it's the
project root, the root of the file system is returned.
@@ -5834,11 +5834,20 @@ def find_project_root(srcs: Iterable[str]) -> Path:
if not srcs:
return Path("/").resolve()
- common_base = min(Path(src).resolve() for src in srcs)
- if common_base.is_dir():
- # Append a fake file so `parents` below returns `common_base_dir`, too.
- common_base /= "fake-file"
- for directory in common_base.parents:
+ path_srcs = [Path(src).resolve() for src in srcs]
+
+ # A list of lists of parents for each 'src'. 'src' is included as a
+ # "parent" of itself if it is a directory
+ src_parents = [
+ list(path.parents) + ([path] if path.is_dir() else []) for path in path_srcs
+ ]
+
+ common_base = max(
+ set.intersection(*(set(parents) for parents in src_parents)),
+ key=lambda path: path.parts,
+ )
+
+ for directory in (common_base, *common_base.parents):
if (directory / ".git").exists():
return directory
diff --git a/tests/test_black.py b/tests/test_black.py
index 88839d86c5a..3ed5daa4b49 100644
--- a/tests/test_black.py
+++ b/tests/test_black.py
@@ -1801,6 +1801,28 @@ def __init__(self) -> None:
self.assertEqual(config["exclude"], r"\.pyi?$")
self.assertEqual(config["include"], r"\.py?$")
+ def test_find_project_root(self) -> None:
+ with TemporaryDirectory() as workspace:
+ root = Path(workspace)
+ test_dir = root / "test"
+ test_dir.mkdir()
+
+ src_dir = root / "src"
+ src_dir.mkdir()
+
+ root_pyproject = root / "pyproject.toml"
+ root_pyproject.touch()
+ src_pyproject = src_dir / "pyproject.toml"
+ src_pyproject.touch()
+ src_python = src_dir / "foo.py"
+ src_python.touch()
+
+ self.assertEqual(
+ black.find_project_root((src_dir, test_dir)), root.resolve()
+ )
+ self.assertEqual(black.find_project_root((src_dir,)), src_dir.resolve())
+ self.assertEqual(black.find_project_root((src_python,)), src_dir.resolve())
+
class BlackDTestCase(AioHTTPTestCase):
async def get_application(self) -> web.Application:
| This PR should fix the bug reported in #1493
I checked to make sure the unit test fails when the bugfix commit is removed. I tried to make sure I followed everything from the contribution guidelines, but please let me know if I missed anything. Thanks for taking the time to review this patch! | https://api.github.com/repos/psf/black/pulls/1518 | 2020-06-24T02:55:20Z | 2020-06-24T09:09:07Z | 2020-06-24T09:09:07Z | 2020-06-24T09:09:08Z | 747 | psf/black | 24,161 |
Fix typo in README | diff --git a/README.md b/README.md
index 1a20c65a2f..aa32356a79 100644
--- a/README.md
+++ b/README.md
@@ -89,7 +89,7 @@ Requests is ready for the demands of building robust and reliable HTTP–speak a
Requests Module Installation
----------------------------
-The recommended way to intall the `requests` module is to simply use [`pipenv`](https://pipenv.kennethreitz.org) (or `pip`, of
+The recommended way to install the `requests` module is to simply use [`pipenv`](https://pipenv.kennethreitz.org) (or `pip`, of
course):
```console
| Fix typo in README related to installation guidelines | https://api.github.com/repos/psf/requests/pulls/5468 | 2020-05-21T13:44:07Z | 2020-06-15T15:23:17Z | 2020-06-15T15:23:17Z | 2021-08-28T00:06:29Z | 159 | psf/requests | 32,622 |
ENH: meth 'DataFrame.to_pickle' and func 'read_pickle' to accept URL GH#30163 | diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
index 5e39ca692746b..11a6f2628ac52 100755
--- a/doc/source/whatsnew/v1.0.0.rst
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -220,6 +220,8 @@ Other enhancements
- The ``pandas.datetime`` class is now deprecated. Import from ``datetime`` instead (:issue:`30296`)
- Added an experimental :attr:`~DataFrame.attrs` for storing global metadata about a dataset (:issue:`29062`)
- :meth:`Timestamp.fromisocalendar` is now compatible with python 3.8 and above (:issue:`28115`)
+- :meth:`DataFrame.to_pickle` and :func:`read_pickle` now accept URL (:issue:`30163`)
+
Build Changes
^^^^^^^^^^^^^
diff --git a/pandas/_testing.py b/pandas/_testing.py
index 2050a18cb48bf..0b81fb0f7a8d5 100644
--- a/pandas/_testing.py
+++ b/pandas/_testing.py
@@ -8,7 +8,7 @@
from shutil import rmtree
import string
import tempfile
-from typing import List, Optional, Union, cast
+from typing import Any, List, Optional, Union, cast
import warnings
import zipfile
@@ -22,7 +22,7 @@
)
import pandas._libs.testing as _testing
-from pandas._typing import FrameOrSeries
+from pandas._typing import FilePathOrBuffer, FrameOrSeries
from pandas.compat import _get_lzma_file, _import_lzma
from pandas.core.dtypes.common import (
@@ -101,15 +101,17 @@ def reset_display_options():
pd.reset_option("^display.", silent=True)
-def round_trip_pickle(obj: FrameOrSeries, path: Optional[str] = None) -> FrameOrSeries:
+def round_trip_pickle(
+ obj: Any, path: Optional[FilePathOrBuffer] = None
+) -> FrameOrSeries:
"""
Pickle an object and then read it again.
Parameters
----------
- obj : pandas object
+ obj : any object
The object to pickle and then re-read.
- path : str, default None
+ path : str, path object or file-like object, default None
The path where the pickled object is written and then read.
Returns
@@ -117,11 +119,12 @@ def round_trip_pickle(obj: FrameOrSeries, path: Optional[str] = None) -> FrameOr
pandas object
The original object that was pickled and then re-read.
"""
- if path is None:
- path = f"__{rands(10)}__.pickle"
- with ensure_clean(path) as path:
- pd.to_pickle(obj, path)
- return pd.read_pickle(path)
+ _path = path
+ if _path is None:
+ _path = f"__{rands(10)}__.pickle"
+ with ensure_clean(_path) as path:
+ pd.to_pickle(obj, _path)
+ return pd.read_pickle(_path)
def round_trip_pathlib(writer, reader, path: Optional[str] = None):
diff --git a/pandas/io/pickle.py b/pandas/io/pickle.py
index 6ce52da21b4e8..e51f24b551f31 100644
--- a/pandas/io/pickle.py
+++ b/pandas/io/pickle.py
@@ -1,13 +1,20 @@
""" pickle compat """
import pickle
+from typing import Any, Optional
import warnings
+from pandas._typing import FilePathOrBuffer
from pandas.compat import pickle_compat as pc
-from pandas.io.common import get_handle, stringify_path
+from pandas.io.common import get_filepath_or_buffer, get_handle
-def to_pickle(obj, path, compression="infer", protocol=pickle.HIGHEST_PROTOCOL):
+def to_pickle(
+ obj: Any,
+ filepath_or_buffer: FilePathOrBuffer,
+ compression: Optional[str] = "infer",
+ protocol: int = pickle.HIGHEST_PROTOCOL,
+):
"""
Pickle (serialize) object to file.
@@ -15,11 +22,17 @@ def to_pickle(obj, path, compression="infer", protocol=pickle.HIGHEST_PROTOCOL):
----------
obj : any object
Any python object.
- path : str
- File path where the pickled object will be stored.
+ filepath_or_buffer : str, path object or file-like object
+ File path, URL, or buffer where the pickled object will be stored.
+
+ .. versionchanged:: 1.0.0
+ Accept URL. URL has to be of S3 or GCS.
+
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default 'infer'
- A string representing the compression to use in the output file. By
- default, infers from the file extension in specified path.
+ If 'infer' and 'path_or_url' is path-like, then detect compression from
+ the following extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise no
+ compression) If 'infer' and 'path_or_url' is not path-like, then use
+ None (= no decompression).
protocol : int
Int which indicates which protocol should be used by the pickler,
default HIGHEST_PROTOCOL (see [1], paragraph 12.1.2). The possible
@@ -63,8 +76,12 @@ def to_pickle(obj, path, compression="infer", protocol=pickle.HIGHEST_PROTOCOL):
>>> import os
>>> os.remove("./dummy.pkl")
"""
- path = stringify_path(path)
- f, fh = get_handle(path, "wb", compression=compression, is_text=False)
+ fp_or_buf, _, compression, should_close = get_filepath_or_buffer(
+ filepath_or_buffer, compression=compression, mode="wb"
+ )
+ if not isinstance(fp_or_buf, str) and compression == "infer":
+ compression = None
+ f, fh = get_handle(fp_or_buf, "wb", compression=compression, is_text=False)
if protocol < 0:
protocol = pickle.HIGHEST_PROTOCOL
try:
@@ -73,9 +90,16 @@ def to_pickle(obj, path, compression="infer", protocol=pickle.HIGHEST_PROTOCOL):
f.close()
for _f in fh:
_f.close()
+ if should_close:
+ try:
+ fp_or_buf.close()
+ except ValueError:
+ pass
-def read_pickle(path, compression="infer"):
+def read_pickle(
+ filepath_or_buffer: FilePathOrBuffer, compression: Optional[str] = "infer"
+):
"""
Load pickled pandas object (or any object) from file.
@@ -86,13 +110,17 @@ def read_pickle(path, compression="infer"):
Parameters
----------
- path : str
- File path where the pickled object will be loaded.
+ filepath_or_buffer : str, path object or file-like object
+ File path, URL, or buffer where the pickled object will be loaded from.
+
+ .. versionchanged:: 1.0.0
+ Accept URL. URL is not limited to S3 and GCS.
+
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default 'infer'
- For on-the-fly decompression of on-disk data. If 'infer', then use
- gzip, bz2, xz or zip if path ends in '.gz', '.bz2', '.xz',
- or '.zip' respectively, and no decompression otherwise.
- Set to None for no decompression.
+ If 'infer' and 'path_or_url' is path-like, then detect compression from
+ the following extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise no
+ compression) If 'infer' and 'path_or_url' is not path-like, then use
+ None (= no decompression).
Returns
-------
@@ -134,8 +162,12 @@ def read_pickle(path, compression="infer"):
>>> import os
>>> os.remove("./dummy.pkl")
"""
- path = stringify_path(path)
- f, fh = get_handle(path, "rb", compression=compression, is_text=False)
+ fp_or_buf, _, compression, should_close = get_filepath_or_buffer(
+ filepath_or_buffer, compression=compression
+ )
+ if not isinstance(fp_or_buf, str) and compression == "infer":
+ compression = None
+ f, fh = get_handle(fp_or_buf, "rb", compression=compression, is_text=False)
# 1) try standard library Pickle
# 2) try pickle_compat (older pandas version) to handle subclass changes
@@ -159,3 +191,8 @@ def read_pickle(path, compression="infer"):
f.close()
for _f in fh:
_f.close()
+ if should_close:
+ try:
+ fp_or_buf.close()
+ except ValueError:
+ pass
diff --git a/pandas/tests/io/test_pickle.py b/pandas/tests/io/test_pickle.py
index ccd77f47b5e5e..3d427dde573af 100644
--- a/pandas/tests/io/test_pickle.py
+++ b/pandas/tests/io/test_pickle.py
@@ -22,6 +22,7 @@
import pytest
from pandas.compat import _get_lzma_file, _import_lzma, is_platform_little_endian
+import pandas.util._test_decorators as td
import pandas as pd
from pandas import Index
@@ -390,3 +391,99 @@ def test_unicode_decode_error(datapath):
# just test the columns are correct since the values are random
excols = pd.Index(["a", "b", "c"])
tm.assert_index_equal(df.columns, excols)
+
+
+# ---------------------
+# tests for buffer I/O
+# ---------------------
+
+
+def test_pickle_buffer_roundtrip():
+ with tm.ensure_clean() as path:
+ df = tm.makeDataFrame()
+ with open(path, "wb") as fh:
+ df.to_pickle(fh)
+ with open(path, "rb") as fh:
+ result = pd.read_pickle(fh)
+ tm.assert_frame_equal(df, result)
+
+
+# ---------------------
+# tests for URL I/O
+# ---------------------
+
+
[email protected](
+ "mockurl", ["http://url.com", "ftp://test.com", "http://gzip.com"]
+)
+def test_pickle_generalurl_read(monkeypatch, mockurl):
+ def python_pickler(obj, path):
+ with open(path, "wb") as fh:
+ pickle.dump(obj, fh, protocol=-1)
+
+ class MockReadResponse:
+ def __init__(self, path):
+ self.file = open(path, "rb")
+ if "gzip" in path:
+ self.headers = {"Content-Encoding": "gzip"}
+ else:
+ self.headers = {"Content-Encoding": None}
+
+ def read(self):
+ return self.file.read()
+
+ def close(self):
+ return self.file.close()
+
+ with tm.ensure_clean() as path:
+
+ def mock_urlopen_read(*args, **kwargs):
+ return MockReadResponse(path)
+
+ df = tm.makeDataFrame()
+ python_pickler(df, path)
+ monkeypatch.setattr("urllib.request.urlopen", mock_urlopen_read)
+ result = pd.read_pickle(mockurl)
+ tm.assert_frame_equal(df, result)
+
+
[email protected]_if_no("gcsfs")
[email protected]("mockurl", ["gs://gcs.com", "gcs://gcs.com"])
+def test_pickle_gcsurl_roundtrip(monkeypatch, mockurl):
+ with tm.ensure_clean() as path:
+
+ class MockGCSFileSystem:
+ def __init__(self, *args, **kwargs):
+ pass
+
+ def open(self, *args):
+ mode = args[1] or None
+ f = open(path, mode)
+ return f
+
+ monkeypatch.setattr("gcsfs.GCSFileSystem", MockGCSFileSystem)
+ df = tm.makeDataFrame()
+ df.to_pickle(mockurl)
+ result = pd.read_pickle(mockurl)
+ tm.assert_frame_equal(df, result)
+
+
[email protected]_if_no("s3fs")
[email protected]("mockurl", ["s3://s3.com", "s3n://s3.com", "s3a://s3.com"])
+def test_pickle_s3url_roundtrip(monkeypatch, mockurl):
+ with tm.ensure_clean() as path:
+
+ class MockS3FileSystem:
+ def __init__(self, *args, **kwargs):
+ pass
+
+ def open(self, *args):
+ mode = args[1] or None
+ f = open(path, mode)
+ return f
+
+ monkeypatch.setattr("s3fs.S3FileSystem", MockS3FileSystem)
+ df = tm.makeDataFrame()
+ df.to_pickle(mockurl)
+ result = pd.read_pickle(mockurl)
+ tm.assert_frame_equal(df, result)
| - [ ] closes #30163
- [x] tests added / passed
- [x] passes `black pandas`
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry | https://api.github.com/repos/pandas-dev/pandas/pulls/30301 | 2019-12-17T06:56:11Z | 2020-01-09T13:16:26Z | 2020-01-09T13:16:26Z | 2020-01-10T04:10:38Z | 2,976 | pandas-dev/pandas | 45,753 |
📝 Update docs for generating clients | diff --git a/docs/en/docs/advanced/generate-clients.md b/docs/en/docs/advanced/generate-clients.md
index 3fed48b0bcf93..f439ed93ab54b 100644
--- a/docs/en/docs/advanced/generate-clients.md
+++ b/docs/en/docs/advanced/generate-clients.md
@@ -12,10 +12,18 @@ A common tool is <a href="https://openapi-generator.tech/" class="external-link"
If you are building a **frontend**, a very interesting alternative is <a href="https://github.com/ferdikoomen/openapi-typescript-codegen" class="external-link" target="_blank">openapi-typescript-codegen</a>.
-Another option you could consider for several languages is <a href="https://www.buildwithfern.com/?utm_source=tiangolo&utm_medium=website&utm_campaign=docs-generate-clients" class="external-link" target="_blank">Fern</a>.
+## Client and SDK Generators - Sponsor
-!!! info
- <a href="https://www.buildwithfern.com/?utm_source=tiangolo&utm_medium=website&utm_campaign=docs-generate-clients" class="external-link" target="_blank">Fern</a> is also a FastAPI sponsor. 😎🎉
+There are also some **company-backed** Client and SDK generators based on OpenAPI (FastAPI), in some cases they can offer you **additional features** on top of high-quality generated SDKs/clients.
+
+Some of them also ✨ [**sponsor FastAPI**](../help-fastapi.md#sponsor-the-author){.internal-link target=_blank} ✨, this ensures the continued and healthy **development** of FastAPI and its **ecosystem**.
+
+And it shows their true commitment to FastAPI and its **community** (you), as they not only want to provide you a **good service** but also want to make sure you have a **good and healthy framework**, FastAPI. 🙇
+
+You might want to try their services and follow their guides:
+
+* <a href="https://www.buildwithfern.com/?utm_source=tiangolo&utm_medium=website&utm_campaign=docs-generate-clients" class="external-link" target="_blank">Fern</a>
+* <a href="https://speakeasyapi.dev/?utm_source=fastapi+repo&utm_medium=github+sponsorship" class="external-link" target="_blank">Speakeasy</a>
## Generate a TypeScript Frontend Client
| 📝 Update docs for generating clients | https://api.github.com/repos/tiangolo/fastapi/pulls/10112 | 2023-08-19T13:45:41Z | 2023-08-19T13:49:54Z | 2023-08-19T13:49:54Z | 2023-08-19T13:49:55Z | 554 | tiangolo/fastapi | 22,787 |
[MRG+1] Telnet console authentication | diff --git a/docs/topics/telnetconsole.rst b/docs/topics/telnetconsole.rst
index ce79c9f3535..bf2ffa44303 100644
--- a/docs/topics/telnetconsole.rst
+++ b/docs/topics/telnetconsole.rst
@@ -16,6 +16,17 @@ The telnet console is a :ref:`built-in Scrapy extension
disable it if you want. For more information about the extension itself see
:ref:`topics-extensions-ref-telnetconsole`.
+.. warning::
+ It is not secure to use telnet console via public networks, as telnet
+ doesn't provide any transport-layer security. Having username/password
+ authentication doesn't change that.
+
+ Intended usage is connecting to a running Scrapy spider locally
+ (spider process and telnet client are on the same machine)
+ or over a secure connection (VPN, SSH tunnel).
+ Please avoid using telnet console over insecure connections,
+ or disable it completely using :setting:`TELNETCONSOLE_ENABLED` option.
+
.. highlight:: none
How to access the telnet console
@@ -26,8 +37,26 @@ The telnet console listens in the TCP port defined in the
the console you need to type::
telnet localhost 6023
+ Trying localhost...
+ Connected to localhost.
+ Escape character is '^]'.
+ Username:
+ Password:
>>>
-
+
+By default Username is ``scrapy`` and Password is autogenerated. The
+autogenerated Password can be seen on scrapy logs like the example bellow::
+
+ 2018-10-16 14:35:21 [scrapy.extensions.telnet] INFO: Telnet Password: 16f92501e8a59326
+
+Default Username and Password can be overriden by the settings
+:setting:`TELNETCONSOLE_USERNAME` and :setting:`TELNETCONSOLE_PASSWORD`.
+
+.. warning::
+ Username and password provide only a limited protection, as telnet
+ is not using secure transport - by default traffic is not encrypted
+ even if username and password are set.
+
You need the telnet program which comes installed by default in Windows, and
most Linux distros.
@@ -160,3 +189,24 @@ Default: ``'127.0.0.1'``
The interface the telnet console should listen on
+
+.. setting:: TELNETCONSOLE_USERNAME
+
+TELNETCONSOLE_USERNAME
+----------------------
+
+Default: ``'scrapy'``
+
+The username used for the telnet console
+
+
+.. setting:: TELNETCONSOLE_PASSWORD
+
+TELNETCONSOLE_PASSWORD
+----------------------
+
+Default: ``None``
+
+The password used for the telnet console, default behaviour is to have it
+autogenerated
+
diff --git a/scrapy/extensions/telnet.py b/scrapy/extensions/telnet.py
index 3024ddfaa3b..dcf73eb8861 100644
--- a/scrapy/extensions/telnet.py
+++ b/scrapy/extensions/telnet.py
@@ -7,6 +7,8 @@
import pprint
import logging
import traceback
+import binascii
+import os
from twisted.internet import protocol
try:
@@ -22,6 +24,7 @@
from scrapy.utils.trackref import print_live_refs
from scrapy.utils.engine import print_engine_status
from scrapy.utils.reactor import listen_tcp
+from scrapy.utils.decorators import defers
try:
import guppy
@@ -49,6 +52,13 @@ def __init__(self, crawler):
self.noisy = False
self.portrange = [int(x) for x in crawler.settings.getlist('TELNETCONSOLE_PORT')]
self.host = crawler.settings['TELNETCONSOLE_HOST']
+ self.username = crawler.settings['TELNETCONSOLE_USERNAME']
+ self.password = crawler.settings['TELNETCONSOLE_PASSWORD']
+
+ if not self.password:
+ self.password = binascii.hexlify(os.urandom(8)).decode('utf8')
+ logger.info('Telnet Password: %s', self.password)
+
self.crawler.signals.connect(self.start_listening, signals.engine_started)
self.crawler.signals.connect(self.stop_listening, signals.engine_stopped)
@@ -67,9 +77,25 @@ def stop_listening(self):
self.port.stopListening()
def protocol(self):
- telnet_vars = self._get_telnet_vars()
- return telnet.TelnetTransport(telnet.TelnetBootstrapProtocol,
- insults.ServerProtocol, manhole.Manhole, telnet_vars)
+ class Portal:
+ """An implementation of IPortal"""
+ @defers
+ def login(self_, credentials, mind, *interfaces):
+ if not (credentials.username == self.username.encode('utf8') and
+ credentials.checkPassword(self.password.encode('utf8'))):
+ raise ValueError("Invalid credentials")
+
+ protocol = telnet.TelnetBootstrapProtocol(
+ insults.ServerProtocol,
+ manhole.Manhole,
+ self._get_telnet_vars()
+ )
+ return (interfaces[0], protocol, lambda: None)
+
+ return telnet.TelnetTransport(
+ telnet.AuthenticatingTelnetProtocol,
+ Portal()
+ )
def _get_telnet_vars(self):
# Note: if you add entries here also update topics/telnetconsole.rst
@@ -85,8 +111,8 @@ def _get_telnet_vars(self):
'p': pprint.pprint,
'prefs': print_live_refs,
'hpy': hpy,
- 'help': "This is Scrapy telnet console. For more info see: " \
- "https://doc.scrapy.org/en/latest/topics/telnetconsole.html",
+ 'help': "This is Scrapy telnet console. For more info see: "
+ "https://doc.scrapy.org/en/latest/topics/telnetconsole.html",
}
self.crawler.signals.send_catch_log(update_telnet_vars, telnet_vars=telnet_vars)
return telnet_vars
diff --git a/scrapy/settings/default_settings.py b/scrapy/settings/default_settings.py
index ca004aedd89..3734a0a5868 100644
--- a/scrapy/settings/default_settings.py
+++ b/scrapy/settings/default_settings.py
@@ -277,6 +277,8 @@
TELNETCONSOLE_ENABLED = 1
TELNETCONSOLE_PORT = [6023, 6073]
TELNETCONSOLE_HOST = '127.0.0.1'
+TELNETCONSOLE_USERNAME = 'scrapy'
+TELNETCONSOLE_PASSWORD = None
SPIDER_CONTRACTS = {}
SPIDER_CONTRACTS_BASE = {
diff --git a/tests/test_extension_telnet.py b/tests/test_extension_telnet.py
new file mode 100644
index 00000000000..4f389e5cb0f
--- /dev/null
+++ b/tests/test_extension_telnet.py
@@ -0,0 +1,60 @@
+try:
+ import unittest.mock as mock
+except ImportError:
+ import mock
+
+from twisted.trial import unittest
+from twisted.conch.telnet import ITelnetProtocol
+from twisted.cred import credentials
+from twisted.internet import defer
+
+from scrapy.extensions.telnet import TelnetConsole, logger
+from scrapy.utils.test import get_crawler
+
+
+class TelnetExtensionTest(unittest.TestCase):
+ def _get_console_and_portal(self, settings=None):
+ crawler = get_crawler(settings_dict=settings)
+ console = TelnetConsole(crawler)
+ username = console.username
+ password = console.password
+
+ # This function has some side effects we don't need for this test
+ console._get_telnet_vars = lambda: {}
+
+ console.start_listening()
+ protocol = console.protocol()
+ portal = protocol.protocolArgs[0]
+
+ return console, portal
+
+ @defer.inlineCallbacks
+ def test_bad_credentials(self):
+ console, portal = self._get_console_and_portal()
+ creds = credentials.UsernamePassword(b'username', b'password')
+ d = portal.login(creds, None, ITelnetProtocol)
+ yield self.assertFailure(d, ValueError)
+ console.stop_listening()
+
+ @defer.inlineCallbacks
+ def test_good_credentials(self):
+ console, portal = self._get_console_and_portal()
+ creds = credentials.UsernamePassword(
+ console.username.encode('utf8'),
+ console.password.encode('utf8')
+ )
+ d = portal.login(creds, None, ITelnetProtocol)
+ yield d
+ console.stop_listening()
+
+ @defer.inlineCallbacks
+ def test_custom_credentials(self):
+ settings = {
+ 'TELNETCONSOLE_USERNAME': 'user',
+ 'TELNETCONSOLE_PASSWORD': 'pass',
+ }
+ console, portal = self._get_console_and_portal(settings=settings)
+ creds = credentials.UsernamePassword(b'user', b'pass')
+ d = portal.login(creds, None, ITelnetProtocol)
+ yield d
+ console.stop_listening()
| This PR adds username/password authentication to Telnet console extension.
ToDo:
- [x] Implement username/password auth
- [x] Implement random password by default (?)
- [x] Add test case
- [x] Update docs | https://api.github.com/repos/scrapy/scrapy/pulls/3415 | 2018-09-05T13:50:40Z | 2018-12-26T13:58:44Z | 2018-12-26T13:58:44Z | 2018-12-26T13:58:48Z | 2,026 | scrapy/scrapy | 34,833 |
ref(analytics): remove resource card analytics | diff --git a/static/app/components/resourceCard.tsx b/static/app/components/resourceCard.tsx
index 5e55f9dd57cb3..8cc70711fb224 100644
--- a/static/app/components/resourceCard.tsx
+++ b/static/app/components/resourceCard.tsx
@@ -3,7 +3,6 @@ import styled from '@emotion/styled';
import Card from 'sentry/components/card';
import ExternalLink from 'sentry/components/links/externalLink';
import {space} from 'sentry/styles/space';
-import {analytics} from 'sentry/utils/analytics';
type Props = {
imgUrl: string;
@@ -14,10 +13,7 @@ type Props = {
function ResourceCard({title, link, imgUrl}: Props) {
return (
<Card interactive>
- <StyledLink
- href={link}
- onClick={() => analytics('orgdash.resource_clicked', {link, title})}
- >
+ <StyledLink href={link}>
<StyledImg src={imgUrl} alt={title} />
<StyledTitle>{title}</StyledTitle>
</StyledLink>
| Removing some BQ-only analytics because we weren't actually looking at this at all | https://api.github.com/repos/getsentry/sentry/pulls/47203 | 2023-04-11T22:58:27Z | 2023-04-12T15:04:14Z | 2023-04-12T15:04:14Z | 2023-04-28T00:23:55Z | 243 | getsentry/sentry | 44,226 |
Fixed test_relational_post_delete_signals_happen_before_parent_object when run in isolation. | diff --git a/tests/delete/tests.py b/tests/delete/tests.py
index 977294427c740..d03492bb6f131 100644
--- a/tests/delete/tests.py
+++ b/tests/delete/tests.py
@@ -422,9 +422,9 @@ def log_post_delete(instance, **kwargs):
self.assertIs(type(instance), S)
deletions.append(instance.id)
- r = R.objects.create(pk=1)
- S.objects.create(pk=1, r=r)
-
+ r = R.objects.create()
+ s = S.objects.create(r=r)
+ s_id = s.pk
models.signals.post_delete.connect(log_post_delete, sender=S)
try:
@@ -433,7 +433,7 @@ def log_post_delete(instance, **kwargs):
models.signals.post_delete.disconnect(log_post_delete)
self.assertEqual(len(deletions), 1)
- self.assertEqual(deletions[0], 1)
+ self.assertEqual(deletions[0], s_id)
@skipUnlessDBFeature("can_defer_constraint_checks")
def test_can_defer_constraint_checks(self):
| ```
$ ./runtests.py delete.tests.DeletionTests.test_relational_post_delete_signals_happen_before_parent_object
Testing against Django installed in '/django/django' with up to 8 processes
Found 1 test(s).
Creating test database for alias 'default'...
System check identified no issues (0 silenced).
E
======================================================================
ERROR: test_relational_post_delete_signals_happen_before_parent_object (delete.tests.DeletionTests)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/django/django/db/models/query.py", line 928, in get_or_create
return self.get(**kwargs), False
File "/django/django/db/models/query.py", line 650, in get
raise self.model.DoesNotExist(
delete.models.R.DoesNotExist: R matching query does not exist.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/django/django/db/backends/utils.py", line 89, in _execute
return self.cursor.execute(sql, params)
psycopg2.errors.UniqueViolation: duplicate key value violates unique constraint "delete_r_pkey"
DETAIL: Key (id)=(1) already exists.
``` | https://api.github.com/repos/django/django/pulls/16059 | 2022-09-16T07:13:14Z | 2022-09-16T09:14:28Z | 2022-09-16T09:14:28Z | 2022-09-16T09:14:43Z | 241 | django/django | 51,623 |
route53: tweak source organization to match other packages | diff --git a/certbot-route53/README.md b/certbot-route53/README.md
index cec9c295c29..582a0fb3564 100644
--- a/certbot-route53/README.md
+++ b/certbot-route53/README.md
@@ -24,7 +24,7 @@ for example).
Make sure you have access to AWS's Route53 service, either through IAM roles or
via `.aws/credentials`. Check out
-[sample-aws-policy.json](sample-aws-policy.json) for the necessary permissions.
+[sample-aws-policy.json](examples/sample-aws-policy.json) for the necessary permissions.
To generate a certificate:
```
diff --git a/certbot-route53/sample-aws-policy.json b/certbot-route53/examples/sample-aws-policy.json
similarity index 100%
rename from certbot-route53/sample-aws-policy.json
rename to certbot-route53/examples/sample-aws-policy.json
diff --git a/certbot-route53/tester.pkoch-macos_sierra.sh b/certbot-route53/tools/tester.pkoch-macos_sierra.sh
similarity index 100%
rename from certbot-route53/tester.pkoch-macos_sierra.sh
rename to certbot-route53/tools/tester.pkoch-macos_sierra.sh
| This change re-organizes some ancillary files to more closely match repository conventions.
Part of #4688. | https://api.github.com/repos/certbot/certbot/pulls/4729 | 2017-05-24T17:51:06Z | 2017-05-26T18:28:55Z | 2017-05-26T18:28:55Z | 2017-06-07T16:53:22Z | 281 | certbot/certbot | 748 |
Add a test case to torture.py | diff --git a/tests/data/torture.py b/tests/data/torture.py
index 7cabd4c163..2a194759a8 100644
--- a/tests/data/torture.py
+++ b/tests/data/torture.py
@@ -22,6 +22,12 @@ def test(self, othr):
(name, description, self.default, self.selected, self.auto_generated, self.parameters, self.meta_data, self.schedule) ==
(name, description, othr.default, othr.selected, othr.auto_generated, othr.parameters, othr.meta_data, othr.schedule))
+
+assert (
+ a_function(very_long_arguments_that_surpass_the_limit, which_is_eighty_eight_in_this_case_plus_a_bit_more)
+ == {"x": "this need to pass the line limit as well", "b": "but only by a little bit"}
+)
+
# output
importA
@@ -76,3 +82,10 @@ def test(self, othr):
othr.meta_data,
othr.schedule,
)
+
+
+assert a_function(
+ very_long_arguments_that_surpass_the_limit,
+ which_is_eighty_eight_in_this_case_plus_a_bit_more,
+) == {"x": "this need to pass the line limit as well", "b": "but only by a little bit"}
+
| Resolves #2563 | https://api.github.com/repos/psf/black/pulls/2822 | 2022-01-29T02:43:51Z | 2022-01-29T03:38:51Z | 2022-01-29T03:38:51Z | 2022-01-29T03:43:44Z | 296 | psf/black | 24,069 |
Pole balancing documentation of observation matches code | diff --git a/gym/envs/classic_control/cartpole.py b/gym/envs/classic_control/cartpole.py
index c4d16e0fe74..eb84d4c84b2 100644
--- a/gym/envs/classic_control/cartpole.py
+++ b/gym/envs/classic_control/cartpole.py
@@ -25,11 +25,11 @@ class CartPoleEnv(gym.Env):
Observation:
Type: Box(4)
- Num Observation Min Max
- 0 Cart Position -4.8 4.8
- 1 Cart Velocity -Inf Inf
- 2 Pole Angle -24 deg 24 deg
- 3 Pole Velocity At Tip -Inf Inf
+ Num Observation Min Max
+ 0 Cart Position -4.8 4.8
+ 1 Cart Velocity -Inf Inf
+ 2 Pole Angle -0.418 rad (-24 deg) 0.418 rad (24 deg)
+ 3 Pole Angular Velocity -Inf Inf
Actions:
Type: Discrete(2)
| The returned angle is returned in the unit of radians and not degrees.
observation number 3 is the angular velocity | https://api.github.com/repos/openai/gym/pulls/1914 | 2020-05-13T15:50:17Z | 2020-06-19T22:18:20Z | 2020-06-19T22:18:20Z | 2020-06-19T22:18:20Z | 270 | openai/gym | 5,713 |
bpo-10496: distutils check_environ() handles getpwuid() error | diff --git a/Lib/distutils/tests/test_util.py b/Lib/distutils/tests/test_util.py
index e2fc3809587faa..bf0d4333f9aeaa 100644
--- a/Lib/distutils/tests/test_util.py
+++ b/Lib/distutils/tests/test_util.py
@@ -4,6 +4,7 @@
import unittest
from copy import copy
from test.support import run_unittest
+from unittest import mock
from distutils.errors import DistutilsPlatformError, DistutilsByteCompileError
from distutils.util import (get_platform, convert_path, change_root,
@@ -234,20 +235,35 @@ def _join(*path):
def test_check_environ(self):
util._environ_checked = 0
- if 'HOME' in os.environ:
- del os.environ['HOME']
+ os.environ.pop('HOME', None)
- # posix without HOME
- if os.name == 'posix': # this test won't run on windows
- check_environ()
- import pwd
- self.assertEqual(os.environ['HOME'], pwd.getpwuid(os.getuid())[5])
- else:
- check_environ()
+ check_environ()
self.assertEqual(os.environ['PLAT'], get_platform())
self.assertEqual(util._environ_checked, 1)
+ @unittest.skipUnless(os.name == 'posix', 'specific to posix')
+ def test_check_environ_getpwuid(self):
+ util._environ_checked = 0
+ os.environ.pop('HOME', None)
+
+ import pwd
+
+ # only set pw_dir field, other fields are not used
+ result = pwd.struct_passwd((None, None, None, None, None,
+ '/home/distutils', None))
+ with mock.patch.object(pwd, 'getpwuid', return_value=result):
+ check_environ()
+ self.assertEqual(os.environ['HOME'], '/home/distutils')
+
+ util._environ_checked = 0
+ os.environ.pop('HOME', None)
+
+ # bpo-10496: Catch pwd.getpwuid() error
+ with mock.patch.object(pwd, 'getpwuid', side_effect=KeyError):
+ check_environ()
+ self.assertNotIn('HOME', os.environ)
+
def test_split_quoted(self):
self.assertEqual(split_quoted('""one"" "two" \'three\' \\four'),
['one', 'two', 'three', 'four'])
diff --git a/Lib/distutils/util.py b/Lib/distutils/util.py
index 83682628ba680c..30a21e4afa1f74 100644
--- a/Lib/distutils/util.py
+++ b/Lib/distutils/util.py
@@ -157,8 +157,13 @@ def check_environ ():
return
if os.name == 'posix' and 'HOME' not in os.environ:
- import pwd
- os.environ['HOME'] = pwd.getpwuid(os.getuid())[5]
+ try:
+ import pwd
+ os.environ['HOME'] = pwd.getpwuid(os.getuid())[5]
+ except (ImportError, KeyError):
+ # bpo-10496: if the current user identifier doesn't exist in the
+ # password database, do nothing
+ pass
if 'PLAT' not in os.environ:
os.environ['PLAT'] = get_platform()
diff --git a/Misc/NEWS.d/next/Library/2018-12-05-17-42-49.bpo-10496.laV_IE.rst b/Misc/NEWS.d/next/Library/2018-12-05-17-42-49.bpo-10496.laV_IE.rst
new file mode 100644
index 00000000000000..cbfe5eb11668b1
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2018-12-05-17-42-49.bpo-10496.laV_IE.rst
@@ -0,0 +1,3 @@
+:func:`~distutils.utils.check_environ` of :mod:`distutils.utils` now catchs
+:exc:`KeyError` on calling :func:`pwd.getpwuid`: don't create the ``HOME``
+environment variable in this case.
| check_environ() of distutils.utils now catchs KeyError on calling
pwd.getpwuid(): don't create the HOME environment variable in this
case.
<!-- issue-number: [bpo-10496](https://bugs.python.org/issue10496) -->
https://bugs.python.org/issue10496
<!-- /issue-number -->
| https://api.github.com/repos/python/cpython/pulls/10931 | 2018-12-05T16:44:38Z | 2018-12-18T15:17:57Z | 2018-12-18T15:17:57Z | 2018-12-18T15:18:27Z | 950 | python/cpython | 4,140 |
Update main.py | diff --git a/Snake_water_gun/main.py b/Snake_water_gun/main.py
index 5b13f54911..5a8b133289 100644
--- a/Snake_water_gun/main.py
+++ b/Snake_water_gun/main.py
@@ -1,6 +1,6 @@
-# This is an editied version
+# This is an edited version
# Made the code much more easier to read
-# Used better naming for variable
+# Used better naming for variables
# There were few inconsistencies in the outputs of the first if/else/if ladder \
# inside the while loop. That is solved.
import random
| Fixed typos | https://api.github.com/repos/geekcomputers/Python/pulls/1682 | 2022-08-30T05:47:48Z | 2022-10-10T20:39:10Z | 2022-10-10T20:39:10Z | 2022-10-10T20:39:10Z | 145 | geekcomputers/Python | 31,125 |
fix typo | diff --git a/modeling.py b/modeling.py
index 8b5da0003..ea575220a 100644
--- a/modeling.py
+++ b/modeling.py
@@ -740,12 +740,12 @@ def transpose_for_scores(input_tensor, batch_size, num_attention_heads,
context_layer = tf.transpose(context_layer, [0, 2, 1, 3])
if do_return_2d_tensor:
- # `context_layer` = [B*F, N*V]
+ # `context_layer` = [B*F, N*H]
context_layer = tf.reshape(
context_layer,
[batch_size * from_seq_length, num_attention_heads * size_per_head])
else:
- # `context_layer` = [B, F, N*V]
+ # `context_layer` = [B, F, N*H]
context_layer = tf.reshape(
context_layer,
[batch_size, from_seq_length, num_attention_heads * size_per_head])
| https://api.github.com/repos/google-research/bert/pulls/252 | 2018-12-11T09:28:41Z | 2018-12-18T18:29:12Z | 2018-12-18T18:29:12Z | 2018-12-18T18:29:12Z | 224 | google-research/bert | 38,410 |
|
Fix several typos | diff --git a/examples/lstm_seq2seq.py b/examples/lstm_seq2seq.py
index a53a3dcde66..2754561df48 100644
--- a/examples/lstm_seq2seq.py
+++ b/examples/lstm_seq2seq.py
@@ -10,7 +10,7 @@
# Summary of the algorithm
- We start with input sequences from a domain (e.g. English sentences)
- and correspding target sequences from another domain
+ and corresponding target sequences from another domain
(e.g. French sentences).
- An encoder LSTM turns input sequences to 2 state vectors
(we keep the last LSTM state and discard the outputs).
diff --git a/keras/backend/tensorflow_backend.py b/keras/backend/tensorflow_backend.py
index 346d0b88662..9716eb0bc4f 100644
--- a/keras/backend/tensorflow_backend.py
+++ b/keras/backend/tensorflow_backend.py
@@ -2736,7 +2736,7 @@ def rnn(step_function, inputs, initial_states,
states: List of tensors.
Returns:
outputs: Tensor with shape (samples, ...) (no time dimension),
- new_states: Tist of tensors, same length and shapes
+ new_states: List of tensors, same length and shapes
as 'states'.
inputs: Tensor of temporal data of shape (samples, time, ...)
(at least 3D).
diff --git a/keras/backend/theano_backend.py b/keras/backend/theano_backend.py
index f91555481b1..3f70d826c88 100644
--- a/keras/backend/theano_backend.py
+++ b/keras/backend/theano_backend.py
@@ -1323,7 +1323,7 @@ def rnn(step_function, inputs, initial_states,
states: List of tensors.
Returns:
outputs: Tensor with shape (samples, ...) (no time dimension),
- new_states: Tist of tensors, same length and shapes
+ new_states: List of tensors, same length and shapes
as 'states'.
inputs: Tensor of temporal data of shape (samples, time, ...)
(at least 3D).
diff --git a/keras/engine/network.py b/keras/engine/network.py
index 5304e706822..46c69795e7a 100644
--- a/keras/engine/network.py
+++ b/keras/engine/network.py
@@ -131,7 +131,7 @@ def _base_init(self, name=None):
# Entries are unique. Includes input and output layers.
self._layers = []
- # Used only in conjonction with graph-networks
+ # Used only in conjunction with graph-networks
self._outbound_nodes = []
self._inbound_nodes = []
@@ -524,7 +524,7 @@ def input_spec(self):
or a single instance if the model has only one input.
"""
if not self._is_graph_network:
- # TODO: support it in subclassd networks after inputs are set.
+ # TODO: support it in subclassed networks after inputs are set.
return None
specs = []
diff --git a/keras/engine/saving.py b/keras/engine/saving.py
index c8fcc1046f1..5580d5df703 100644
--- a/keras/engine/saving.py
+++ b/keras/engine/saving.py
@@ -833,7 +833,7 @@ def _need_convert_kernel(original_backend):
The convolution operation is implemented differently in different backends.
While TH implements convolution, TF and CNTK implement the correlation operation.
So the channel axis needs to be flipped when we're loading TF weights onto a TH model,
- or vice verca. However, there's no conversion required between TF and CNTK.
+ or vice versa. However, there's no conversion required between TF and CNTK.
# Arguments
original_backend: Keras backend the weights were trained with, as a string.
| Just fixing several typos, the first three of which are visible in the keras.io documentation. | https://api.github.com/repos/keras-team/keras/pulls/10468 | 2018-06-19T01:13:25Z | 2018-06-19T01:43:25Z | 2018-06-19T01:43:25Z | 2018-06-19T01:43:25Z | 889 | keras-team/keras | 47,360 |
fix upsample flops compute by skipping unused kargs | diff --git a/deepspeed/profiling/flops_profiler/profiler.py b/deepspeed/profiling/flops_profiler/profiler.py
index a1a6a6fac9f1..9a4889005782 100644
--- a/deepspeed/profiling/flops_profiler/profiler.py
+++ b/deepspeed/profiling/flops_profiler/profiler.py
@@ -511,7 +511,7 @@ def _silu_flops_compute(input: Tensor, inplace: bool = False):
return input.numel(), 0
-def _gelu_flops_compute(input):
+def _gelu_flops_compute(input, **kwargs):
return input.numel(), 0
@@ -668,16 +668,14 @@ def _instance_norm_flops_compute(
return input.numel() * (5 if has_affine else 4), 0
-def _upsample_flops_compute(input,
- size=None,
- scale_factor=None,
- mode="nearest",
- align_corners=None):
+def _upsample_flops_compute(input, **kwargs):
+ size = kwargs.get('size', None)
if size is not None:
- if isinstance(size, tuple):
+ if isinstance(size, tuple) or isinstance(size, list):
return int(_prod(size)), 0
else:
return int(size), 0
+ scale_factor = kwargs.get('scale_factor', None)
assert scale_factor is not None, "either size or scale_factor should be defined"
flops = input.numel()
if isinstance(scale_factor, tuple) and len(scale_factor) == len(input):
| This PR fixes the flops computation for torch.nn.functional.upsample (which takes more key args in the latest API) by allowing arbitrary number of key args.
| https://api.github.com/repos/microsoft/DeepSpeed/pulls/2773 | 2023-01-31T20:26:50Z | 2023-02-01T20:44:28Z | 2023-02-01T20:44:28Z | 2023-02-01T20:44:29Z | 357 | microsoft/DeepSpeed | 10,686 |
Add --disable-hook-validation | diff --git a/certbot/cli.py b/certbot/cli.py
index bcb7785c59e..f3decec0598 100644
--- a/certbot/cli.py
+++ b/certbot/cli.py
@@ -356,7 +356,8 @@ def parse_args(self):
" {0} conflicts with dialog_mode").format(arg)
)
- hooks.validate_hooks(parsed_args)
+ if parsed_args.validate_hooks:
+ hooks.validate_hooks(parsed_args)
return parsed_args
@@ -792,6 +793,14 @@ def prepare_and_parse_args(plugins, args, detect_defaults=False):
"For this command, the shell variable $RENEWED_LINEAGE will point to the"
"config live subdirectory containing the new certs and keys; the shell variable "
"$RENEWED_DOMAINS will contain a space-delimited list of renewed cert domains")
+ helpful.add(
+ "renew", "--disable-hook-validation",
+ action='store_false', dest='validate_hooks', default=True,
+ help="Ordinarily the commands specified for --pre-hook/--post-hook/--renew-hook"
+ " will be checked for validity, to see if the programs being run are in the $PATH,"
+ " so that mistakes can be caught early, even when the hooks aren't being run just yet."
+ " The validation is rather simplistic and fails if you use more advanced"
+ " shell constructs, so you can use this switch to disable it.")
helpful.add_deprecated_argument("--agree-dev-preview", 0)
diff --git a/certbot/tests/cli_test.py b/certbot/tests/cli_test.py
index 671da16f05f..adbde1d3e33 100644
--- a/certbot/tests/cli_test.py
+++ b/certbot/tests/cli_test.py
@@ -651,6 +651,18 @@ def test_quiet_renew(self):
out = stdout.getvalue()
self.assertEqual("", out)
+ def test_renew_hook_validation(self):
+ self._make_test_renewal_conf('sample-renewal.conf')
+ args = ["renew", "--dry-run", "--post-hook=no-such-command"]
+ self._test_renewal_common(True, [], args=args, should_renew=False,
+ error_expected=True)
+
+ def test_renew_no_hook_validation(self):
+ self._make_test_renewal_conf('sample-renewal.conf')
+ args = ["renew", "--dry-run", "--post-hook=no-such-command",
+ "--disable-hook-validation"]
+ self._test_renewal_common(True, [], args=args, should_renew=True,
+ error_expected=False)
@mock.patch("certbot.cli.set_by_cli")
def test_ancient_webroot_renewal_conf(self, mock_set_by_cli):
| As discussed in #3020.
| https://api.github.com/repos/certbot/certbot/pulls/3037 | 2016-05-20T05:51:02Z | 2016-06-08T19:35:14Z | 2016-06-08T19:35:14Z | 2016-06-15T01:23:59Z | 616 | certbot/certbot | 2,480 |
loggerd: remove duplicate calls to visionstream_destroy | diff --git a/selfdrive/loggerd/loggerd.cc b/selfdrive/loggerd/loggerd.cc
index cb2e5d47535b7b..d575db9900a199 100644
--- a/selfdrive/loggerd/loggerd.cc
+++ b/selfdrive/loggerd/loggerd.cc
@@ -162,7 +162,6 @@ void encoder_thread(bool is_streaming, bool raw_clips, bool front) {
VIPCBuf* buf = visionstream_get(&stream, &extra);
if (buf == NULL) {
LOG("visionstream get failed");
- visionstream_destroy(&stream);
break;
}
| https://api.github.com/repos/commaai/openpilot/pulls/1843 | 2020-07-08T13:15:18Z | 2020-07-09T04:47:23Z | 2020-07-09T04:47:23Z | 2020-07-09T07:38:19Z | 137 | commaai/openpilot | 9,531 |
|
Typo fix | diff --git a/Methodology and Resources/Cobalt Strike - Cheatsheet.md b/Methodology and Resources/Cobalt Strike - Cheatsheet.md
index 8e25e592e2..affccdbefd 100644
--- a/Methodology and Resources/Cobalt Strike - Cheatsheet.md
+++ b/Methodology and Resources/Cobalt Strike - Cheatsheet.md
@@ -280,7 +280,7 @@ beacon > execute-assembly /home/audit/Rubeus.exe
:warning: OPSEC Advice: Use the **spawnto** command to change the process Beacon will launch for its post-exploitation jobs. The default is rundll32.exe
-- **portscan:** Performs a portscan on a spesific target.
+- **portscan:** Performs a portscan on a specific target.
- **runas:** A wrapper of runas.exe, using credentials you can run a command as another user.
- **pth:** By providing a username and a NTLM hash you can perform a Pass The Hash attack and inject a TGT on the current process. \
:exclamation: This module needs Administrator privileges.
| https://api.github.com/repos/swisskyrepo/PayloadsAllTheThings/pulls/522 | 2022-08-08T12:09:16Z | 2022-08-08T20:08:20Z | 2022-08-08T20:08:20Z | 2022-08-08T20:08:20Z | 246 | swisskyrepo/PayloadsAllTheThings | 8,456 |
|
DOC Update default values in kernel_approximation doc string | diff --git a/sklearn/kernel_approximation.py b/sklearn/kernel_approximation.py
index eda042bfed34e..d13c172a5d644 100644
--- a/sklearn/kernel_approximation.py
+++ b/sklearn/kernel_approximation.py
@@ -32,10 +32,10 @@ class RBFSampler(TransformerMixin, BaseEstimator):
Parameters
----------
- gamma : float
+ gamma : float, default=1.0
Parameter of RBF kernel: exp(-gamma * x^2)
- n_components : int
+ n_components : int, default=100
Number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
@@ -146,10 +146,10 @@ class SkewedChi2Sampler(TransformerMixin, BaseEstimator):
Parameters
----------
- skewedness : float
+ skewedness : float, default=1.0
"skewedness" parameter of the kernel. Needs to be cross-validated.
- n_components : int
+ n_components : int, default=100
number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
@@ -455,7 +455,7 @@ class Nystroem(TransformerMixin, BaseEstimator):
Parameters
----------
- kernel : string or callable, default="rbf"
+ kernel : string or callable, default='rbf'
Kernel map to be approximated. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
@@ -477,7 +477,7 @@ class Nystroem(TransformerMixin, BaseEstimator):
Additional parameters (keyword arguments) for kernel function passed
as callable object.
- n_components : int
+ n_components : int, default=100
Number of features to construct.
How many data points will be used to construct the mapping.
| Updated default values of parameter in kernel_approximation doc string
#### Reference Issues/PRs
Reference: #15761
Addresses sklearn.kernel_approximation
#DataUmbrella
| https://api.github.com/repos/scikit-learn/scikit-learn/pulls/17536 | 2020-06-08T21:22:38Z | 2020-06-08T21:59:57Z | 2020-06-08T21:59:57Z | 2020-06-08T21:59:57Z | 446 | scikit-learn/scikit-learn | 46,150 |
implement admin message detail page | diff --git a/website/src/components/Messages/MessageTableEntry.tsx b/website/src/components/Messages/MessageTableEntry.tsx
index d968678689..b8ff9ef98a 100644
--- a/website/src/components/Messages/MessageTableEntry.tsx
+++ b/website/src/components/Messages/MessageTableEntry.tsx
@@ -1,5 +1,6 @@
import {
Avatar,
+ AvatarProps,
Box,
HStack,
Menu,
@@ -15,7 +16,18 @@ import {
useToast,
} from "@chakra-ui/react";
import { boolean } from "boolean";
-import { ClipboardList, Copy, Flag, Link, MessageSquare, MoreHorizontal, Slash, Trash, User } from "lucide-react";
+import {
+ ClipboardList,
+ Copy,
+ Flag,
+ Link,
+ MessageSquare,
+ MoreHorizontal,
+ Shield,
+ Slash,
+ Trash,
+ User,
+} from "lucide-react";
import { useRouter } from "next/router";
import { useTranslation } from "next-i18next";
import { useCallback, useEffect, useMemo, useState } from "react";
@@ -24,6 +36,7 @@ import { MessageEmojiButton } from "src/components/Messages/MessageEmojiButton";
import { ReportPopup } from "src/components/Messages/ReportPopup";
import { useHasAnyRole } from "src/hooks/auth/useHasAnyRole";
import { del, post, put } from "src/lib/api";
+import { ROUTES } from "src/lib/routes";
import { colors } from "src/styles/Theme/colors";
import { Message, MessageEmojis } from "src/types/Conversation";
import { emojiIcons, isKnownEmoji } from "src/types/Emoji";
@@ -34,9 +47,17 @@ interface MessageTableEntryProps {
message: Message;
enabled?: boolean;
highlight?: boolean;
+ avartarPosition?: "middle" | "top";
+ avartarProps?: AvatarProps;
}
-export function MessageTableEntry({ message, enabled, highlight }: MessageTableEntryProps) {
+export function MessageTableEntry({
+ message,
+ enabled,
+ highlight,
+ avartarPosition = "middle",
+ avartarProps,
+}: MessageTableEntryProps) {
const router = useRouter();
const [emojiState, setEmojis] = useState<MessageEmojis>({ emojis: {}, user_emojis: [] });
useEffect(() => {
@@ -68,9 +89,10 @@ export function MessageTableEntry({ message, enabled, highlight }: MessageTableE
mr={inlineAvatar ? 2 : 0}
name={`${boolean(message.is_assistant) ? "Assistant" : "User"}`}
src={`${boolean(message.is_assistant) ? "/images/logos/logo.png" : "/images/temp-avatars/av1.jpg"}`}
+ {...avartarProps}
/>
),
- [borderColor, inlineAvatar, message.is_assistant]
+ [avartarProps, borderColor, inlineAvatar, message.is_assistant]
);
const highlightColor = useColorModeValue(colors.light.active, colors.dark.active);
@@ -86,13 +108,17 @@ export function MessageTableEntry({ message, enabled, highlight }: MessageTableE
};
return (
- <HStack w={["full", "full", "full", "fit-content"]} gap={2}>
+ <HStack
+ w={["full", "full", "full", "fit-content"]}
+ gap={0.5}
+ alignItems={avartarPosition === "top" ? "start" : "center"}
+ >
{!inlineAvatar && avatar}
<Box
width={["full", "full", "full", "fit-content"]}
maxWidth={["full", "full", "full", "2xl"]}
p="4"
- borderRadius="md"
+ borderRadius="18px"
bg={message.is_assistant ? backgroundColor : backgroundColor2}
outline={highlight && "2px solid black"}
outlineColor={highlightColor}
@@ -249,6 +275,9 @@ const MessageActions = ({
<MenuItem onClick={() => handleCopy(id)} icon={<Copy />}>
{t("copy_message_id")}
</MenuItem>
+ <MenuItem as="a" href={ROUTES.ADMIN_MESSAGE_DETAIL(message.id)} target="_blank" icon={<Shield />}>
+ View in admin area
+ </MenuItem>
<MenuItem as="a" href={`/admin/manage_user/${message.user_id}`} target="_blank" icon={<User />}>
{t("view_user")}
</MenuItem>
diff --git a/website/src/components/Messages/MessageTree.tsx b/website/src/components/Messages/MessageTree.tsx
new file mode 100644
index 0000000000..639e133067
--- /dev/null
+++ b/website/src/components/Messages/MessageTree.tsx
@@ -0,0 +1,104 @@
+import { Box } from "@chakra-ui/react";
+import { Fragment } from "react";
+import { MessageWithChildren } from "src/types/Conversation";
+
+import { MessageTableEntry } from "./MessageTableEntry";
+
+const connectionColor = "gray.300";
+const messagePaddingTop = 16;
+const avatarSize = 32;
+const avartarMarginTop = 6;
+const maxDepth = 100; // this only used for debug UI in mobile
+const left = avatarSize / 2 - 1;
+
+export const MessageTree = ({ tree, messageId }: { tree: MessageWithChildren; messageId?: string }) => {
+ const renderChildren = (children: MessageWithChildren[], depth = 1) => {
+ const hasSibling = children.length > 1;
+ return children.map((child, idx) => {
+ const hasChildren = child.children.length > 0;
+ const isLastChild = idx === children.length - 1;
+ return (
+ <Fragment key={child.id}>
+ <Box position="relative" className="box2">
+ <ConnectionCurve></ConnectionCurve>
+ <Box paddingLeft={`32px`} position="relative" className="box3">
+ {hasSibling && !isLastChild && (
+ <Box
+ height={`calc(100% - 26px)`}
+ position="absolute"
+ width="2px"
+ bg="gray.300"
+ left={`${left}px`}
+ top="26px"
+ ></Box>
+ )}
+ <Box pt={`${messagePaddingTop}px`} position="relative" className="box4">
+ {hasChildren && depth < maxDepth && <Connection className="connection1"></Connection>}
+ <MessageTableEntry
+ avartarProps={{
+ mt: `${avartarMarginTop}px`,
+ }}
+ avartarPosition="top"
+ highlight={child.id === messageId}
+ message={child}
+ ></MessageTableEntry>
+ </Box>
+ {depth < maxDepth && renderChildren(child.children, depth + 1)}
+ </Box>
+ </Box>
+ </Fragment>
+ );
+ });
+ };
+
+ return (
+ <>
+ <Box position="relative">
+ <Box height="full" position="absolute" width="2px" bg={connectionColor} left={`${left}px`}></Box>
+ <MessageTableEntry
+ message={tree}
+ avartarPosition="top"
+ highlight={tree.id === messageId}
+ avartarProps={{
+ size: "sm",
+ }}
+ ></MessageTableEntry>
+ </Box>
+ {renderChildren(tree.children)}
+ </>
+ );
+};
+
+const Connection = ({ className, isSibling = false }: { isSibling?: boolean; className?: string }) => {
+ const top = isSibling ? `26px` : `32px`;
+ return (
+ <Box
+ height={`calc(100% - ${top})`}
+ position="absolute"
+ width="2px"
+ bg="gray.300"
+ left={`${left}px`}
+ top={top}
+ className={className}
+ ></Box>
+ );
+};
+
+const height = avatarSize / 2 + avartarMarginTop + messagePaddingTop;
+const width = avatarSize / 2 + 10;
+const ConnectionCurve = () => {
+ return (
+ <Box
+ position="absolute"
+ height={`${height}px`}
+ width={`${width}px`}
+ left={`${left}px `}
+ borderBottomWidth="2px"
+ borderBottomLeftRadius="10px"
+ borderLeftStyle="solid"
+ borderLeftWidth="2px"
+ borderColor={connectionColor}
+ className="curve"
+ ></Box>
+ );
+};
diff --git a/website/src/lib/oasst_api_client.ts b/website/src/lib/oasst_api_client.ts
index a36073232c..aec9b2e8df 100644
--- a/website/src/lib/oasst_api_client.ts
+++ b/website/src/lib/oasst_api_client.ts
@@ -189,6 +189,13 @@ export class OasstApiClient {
return this.get<Message>(`/api/v1/messages/${message_id}?username=${user.id}&auth_method=${user.auth_method}`);
}
+ async fetch_message_tree(message_id: string) {
+ return this.get<{
+ id: string;
+ messages: Message[];
+ }>(`/api/v1/messages/${message_id}/tree`);
+ }
+
/**
* Delete a message by its id
*/
diff --git a/website/src/pages/admin/messages/[id].tsx b/website/src/pages/admin/messages/[id].tsx
new file mode 100644
index 0000000000..8c725d07a9
--- /dev/null
+++ b/website/src/pages/admin/messages/[id].tsx
@@ -0,0 +1,68 @@
+import { Card, CardBody, CardHeader, CircularProgress, Grid } from "@chakra-ui/react";
+import { GetServerSideProps } from "next";
+import Head from "next/head";
+import { useRouter } from "next/router";
+import { serverSideTranslations } from "next-i18next/serverSideTranslations";
+import { AdminArea } from "src/components/AdminArea";
+import { JsonCard } from "src/components/JsonCard";
+import { getAdminLayout } from "src/components/Layout";
+import { MessageTree } from "src/components/Messages/MessageTree";
+import { get } from "src/lib/api";
+import { Message, MessageWithChildren } from "src/types/Conversation";
+import useSWRImmutable from "swr/immutable";
+
+const MessageDetail = () => {
+ const router = useRouter();
+ const messageId = router.query.id;
+ const { data, isLoading, error } = useSWRImmutable<{
+ tree: MessageWithChildren | null;
+ message?: Message;
+ }>(`/api/admin/messages/${messageId}/tree`, get);
+
+ return (
+ <>
+ <Head>
+ <title>Open Assistant</title>
+ </Head>
+ <AdminArea>
+ {isLoading && <CircularProgress isIndeterminate></CircularProgress>}
+ {error && "Unable to load message tree"}
+ {data &&
+ (data.tree === null ? (
+ "Unable to build tree"
+ ) : (
+ <Grid gap="6">
+ <Card>
+ <CardHeader fontWeight="bold" fontSize="xl" pb="0">
+ Message Detail
+ </CardHeader>
+ <CardBody>
+ <JsonCard>{data.message}</JsonCard>
+ </CardBody>
+ </Card>
+ <Card>
+ <CardHeader fontWeight="bold" fontSize="xl" pb="0">
+ Tree {data.tree.id}
+ </CardHeader>
+ <CardBody>
+ <MessageTree tree={data.tree} messageId={data.message?.id}></MessageTree>
+ </CardBody>
+ </Card>
+ </Grid>
+ ))}
+ </AdminArea>
+ </>
+ );
+};
+
+MessageDetail.getLayout = getAdminLayout;
+
+export default MessageDetail;
+
+export const getServerSideProps: GetServerSideProps = async ({ locale = "en" }) => {
+ return {
+ props: {
+ ...(await serverSideTranslations(locale, ["common", "labelling", "message"])),
+ },
+ };
+};
diff --git a/website/src/pages/api/admin/messages/[id]/tree.ts b/website/src/pages/api/admin/messages/[id]/tree.ts
new file mode 100644
index 0000000000..0688dab8e1
--- /dev/null
+++ b/website/src/pages/api/admin/messages/[id]/tree.ts
@@ -0,0 +1,52 @@
+import { withAnyRole } from "src/lib/auth";
+import { createApiClient } from "src/lib/oasst_client_factory";
+import { Message, MessageWithChildren } from "src/types/Conversation";
+
+export default withAnyRole(["admin", "moderator"], async (req, res, token) => {
+ const client = await createApiClient(token);
+ const messageId = req.query.id as string;
+ const response = await client.fetch_message_tree(messageId);
+
+ if (!response) {
+ return res.json({ tree: null });
+ }
+
+ const tree = buildTree(response.messages);
+
+ return res.json({ tree, message: response.messages.find((m) => m.id === messageId) });
+});
+
+// https://medium.com/@lizhuohang.selina/building-a-hierarchical-tree-from-a-flat-list-an-easy-to-understand-solution-visualisation-19cb24bdfa33
+const buildTree = (messages: Message[]): MessageWithChildren | null => {
+ const map: Record<string, MessageWithChildren> = {};
+ const tree = [];
+
+ // Build a hash table and map items to objects
+ messages.forEach(function (item) {
+ const id = item.id;
+ if (!map[id]) {
+ map[id] = { ...item, children: [] };
+ }
+ });
+
+ // Loop over hash table
+ let mappedElem: MessageWithChildren;
+ for (const id in map) {
+ if (map[id]) {
+ mappedElem = map[id];
+
+ // If the element is not at the root level, add it to its parent array of children. Note this will continue till we have only root level elements left
+ if (mappedElem.parent_id) {
+ const parentId = mappedElem.parent_id;
+ map[parentId].children.push(mappedElem);
+ }
+
+ // If the element is at the root level, directly push to the tree
+ else {
+ tree.push(mappedElem);
+ }
+ }
+ }
+
+ return tree.shift() || null;
+};
diff --git a/website/src/types/Conversation.ts b/website/src/types/Conversation.ts
index 57a9efbb4d..f5841f2722 100644
--- a/website/src/types/Conversation.ts
+++ b/website/src/types/Conversation.ts
@@ -16,7 +16,7 @@ export interface Message extends MessageEmojis {
is_assistant: boolean;
lang: string;
created_date: string; // iso date string
- parent_id: string;
+ parent_id: string | null;
frontend_message_id?: string;
user_id: string;
user_is_author: boolean | null;
@@ -40,3 +40,7 @@ export type FetchUserMessagesCursorResponse = {
items: Message[];
order: "asc" | "desc";
};
+
+export type MessageWithChildren = Message & {
+ children: MessageWithChildren[];
+};
| - Add admin message detail page.
- Add new message tree UI and use/tree endpoint to receive data.
- New message tree only being used in the admin panel. Not in the user area yet.
- The current tree UI display is not really well in mobile when the depth > 4. I will address this in the next PR.
Message tree UI
![image](https://user-images.githubusercontent.com/33456881/218231870-bcb02f5c-7557-4a35-b0bb-d4352e02fd53.png)
| https://api.github.com/repos/LAION-AI/Open-Assistant/pulls/1453 | 2023-02-11T01:39:45Z | 2023-02-11T03:27:02Z | 2023-02-11T03:27:02Z | 2023-02-11T03:27:03Z | 3,518 | LAION-AI/Open-Assistant | 37,088 |
Added GitHub Actions to create and check for reminders in pull requests. | diff --git a/.github/workflows/reminders_check.yml b/.github/workflows/reminders_check.yml
new file mode 100644
index 0000000000000..eaaa909363202
--- /dev/null
+++ b/.github/workflows/reminders_check.yml
@@ -0,0 +1,17 @@
+name: Check reminders
+
+on:
+ schedule:
+ - cron: '0 * * * *' # At the start of every hour
+ workflow_dispatch:
+
+permissions:
+ contents: read
+ pull-requests: write
+
+jobs:
+ reminders:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Check reminders and notify users
+ uses: agrc/reminder-action@v1
diff --git a/.github/workflows/reminders_create.yml b/.github/workflows/reminders_create.yml
new file mode 100644
index 0000000000000..f92320bcc2e75
--- /dev/null
+++ b/.github/workflows/reminders_create.yml
@@ -0,0 +1,17 @@
+name: Create reminders
+
+on:
+ issue_comment:
+ types: [created, edited]
+ workflow_dispatch:
+
+permissions:
+ contents: read
+ pull-requests: write
+
+jobs:
+ reminders:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Check comments and create reminders
+ uses: agrc/create-reminder-action@v1
| This branch provides functionality to create reminders on pull requests. The command to do so is:
```
/remind me <action> [in] <time>
```
Current granularity of the reminders is by day, so valid uses would be:
```
/remind me to re-review this PR in 2 days
/remind me to check for updates tomorrow
/remind me to evaluate reminders in 2 months
```
Example of adding a reminder and the following reminder posted by the action is:
![image](https://github.com/django/django/assets/124304/0bcc172f-22b5-4828-a7ba-cade2b9eac89)
![image](https://github.com/django/django/assets/124304/ef48ead4-a84d-4b8b-a644-571dfcf6ef06)
| https://api.github.com/repos/django/django/pulls/17852 | 2024-02-13T20:53:49Z | 2024-02-22T12:17:46Z | 2024-02-22T12:17:46Z | 2024-04-12T20:27:27Z | 337 | django/django | 50,850 |
community: Implement lazy_load() for GitbookLoader | diff --git a/libs/community/langchain_community/document_loaders/gitbook.py b/libs/community/langchain_community/document_loaders/gitbook.py
index d2a32b367a062d..8fdf73708967b3 100644
--- a/libs/community/langchain_community/document_loaders/gitbook.py
+++ b/libs/community/langchain_community/document_loaders/gitbook.py
@@ -1,4 +1,4 @@
-from typing import Any, List, Optional
+from typing import Any, Iterator, List, Optional
from urllib.parse import urljoin, urlparse
from langchain_core.documents import Document
@@ -47,23 +47,23 @@ def __init__(
self.load_all_paths = load_all_paths
self.content_selector = content_selector
- def load(self) -> List[Document]:
+ def lazy_load(self) -> Iterator[Document]:
"""Fetch text from one single GitBook page."""
if self.load_all_paths:
soup_info = self.scrape()
relative_paths = self._get_paths(soup_info)
urls = [urljoin(self.base_url, path) for path in relative_paths]
soup_infos = self.scrape_all(urls)
- _documents = [
- self._get_document(soup_info, url)
- for soup_info, url in zip(soup_infos, urls)
- ]
+ for soup_info, url in zip(soup_infos, urls):
+ doc = self._get_document(soup_info, url)
+ if doc:
+ yield doc
+
else:
soup_info = self.scrape()
- _documents = [self._get_document(soup_info, self.web_path)]
- documents = [d for d in _documents if d]
-
- return documents
+ doc = self._get_document(soup_info, self.web_path)
+ if doc:
+ yield doc
def _get_document(
self, soup: Any, custom_url: Optional[str] = None
| Integration test: `tests/integration_tests/document_loaders/test_gitbook.py` | https://api.github.com/repos/langchain-ai/langchain/pulls/18670 | 2024-03-06T13:55:29Z | 2024-03-06T14:14:36Z | 2024-03-06T14:14:36Z | 2024-03-06T14:15:07Z | 421 | langchain-ai/langchain | 43,331 |
Better error message for DataFrame.hist() without numerical columns (… | diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py
index 90297ecfa3415..fed4b0d90983c 100644
--- a/pandas/plotting/_core.py
+++ b/pandas/plotting/_core.py
@@ -2426,6 +2426,10 @@ def hist_frame(data, column=None, by=None, grid=True, xlabelsize=None,
data = data._get_numeric_data()
naxes = len(data.columns)
+ if naxes == 0:
+ raise ValueError("hist method requires numerical columns, "
+ "nothing to plot.")
+
fig, axes = _subplots(naxes=naxes, ax=ax, squeeze=False,
sharex=sharex, sharey=sharey, figsize=figsize,
layout=layout)
diff --git a/pandas/tests/plotting/test_hist_method.py b/pandas/tests/plotting/test_hist_method.py
index c62ed21c2fb17..f3f6c9c7fc2d4 100644
--- a/pandas/tests/plotting/test_hist_method.py
+++ b/pandas/tests/plotting/test_hist_method.py
@@ -209,6 +209,16 @@ def test_hist_df_legacy(self):
with pytest.raises(AttributeError):
ser.hist(foo='bar')
+ @pytest.mark.slow
+ def test_hist_non_numerical_raises(self):
+ # gh-10444
+ df = DataFrame(np.random.rand(10, 2))
+ df_o = df.astype(np.object)
+
+ msg = "hist method requires numerical columns, nothing to plot."
+ with pytest.raises(ValueError, match=msg):
+ df_o.hist()
+
@pytest.mark.slow
def test_hist_layout(self):
df = DataFrame(randn(100, 3))
| Closes #10444
Added simple check for non-zero number of numeric columns plus suggested error message in case the check fails.
Happy to make any adjustments this if desired.
| https://api.github.com/repos/pandas-dev/pandas/pulls/26483 | 2019-05-21T19:16:14Z | 2019-05-24T15:47:02Z | 2019-05-24T15:47:02Z | 2019-05-25T12:57:38Z | 405 | pandas-dev/pandas | 44,906 |
docker: Remove misleading CLI_ARGS | diff --git a/docker/.env.example b/docker/.env.example
index 6e2ad6ac3d..2de9f0ab6f 100644
--- a/docker/.env.example
+++ b/docker/.env.example
@@ -2,8 +2,6 @@
# however for me to work i had to specify the exact version for my card ( 2060 ) it was 7.5
# https://developer.nvidia.com/cuda-gpus you can find the version for your card here
TORCH_CUDA_ARCH_LIST=7.5
-# your command-line flags go here:
-CLI_ARGS=--listen
# the port the webui binds to on the host
HOST_PORT=7860
# the port the webui binds to inside the container
| CLI_ARGS don't do anything since b1463df. Fixes #5422.
## Checklist:
- [ x] I have read the [Contributing guidelines](https://github.com/oobabooga/text-generation-webui/wiki/Contributing-guidelines). | https://api.github.com/repos/oobabooga/text-generation-webui/pulls/5726 | 2024-03-18T17:14:09Z | 2024-03-26T19:34:53Z | 2024-03-26T19:34:53Z | 2024-03-29T06:16:57Z | 167 | oobabooga/text-generation-webui | 26,794 |
[fix] fix typo change MoECheckpintIO to MoECheckpointIO | diff --git a/colossalai/accelerator/api.py b/colossalai/accelerator/api.py
index 02b3055d7380..85a19b6a61d3 100644
--- a/colossalai/accelerator/api.py
+++ b/colossalai/accelerator/api.py
@@ -40,7 +40,7 @@ def set_accelerator(accelerator: Union[str, BaseAccelerator]) -> None:
def auto_set_accelerator() -> None:
"""
Automatically check if any accelerator is available.
- If an accelerator is availabe, set it as the global accelerator.
+ If an accelerator is available, set it as the global accelerator.
"""
global _ACCELERATOR
diff --git a/colossalai/booster/plugin/gemini_plugin.py b/colossalai/booster/plugin/gemini_plugin.py
index 95b96bbfd9ed..6c503377326a 100644
--- a/colossalai/booster/plugin/gemini_plugin.py
+++ b/colossalai/booster/plugin/gemini_plugin.py
@@ -437,7 +437,7 @@ def __init__(
)
def __del__(self):
- """Destroy the prcess groups in ProcessGroupMesh"""
+ """Destroy the process groups in ProcessGroupMesh"""
self.pg_mesh.destroy_mesh_process_groups()
def support_no_sync(self) -> bool:
diff --git a/colossalai/booster/plugin/hybrid_parallel_plugin.py b/colossalai/booster/plugin/hybrid_parallel_plugin.py
index bf677e052f88..8cc76dd3e0f3 100644
--- a/colossalai/booster/plugin/hybrid_parallel_plugin.py
+++ b/colossalai/booster/plugin/hybrid_parallel_plugin.py
@@ -1067,7 +1067,7 @@ def __init__(
self.max_norm = max_norm
def __del__(self):
- """Destroy the prcess groups in ProcessGroupMesh"""
+ """Destroy the process groups in ProcessGroupMesh"""
self.pg_mesh.destroy_mesh_process_groups()
@property
diff --git a/colossalai/booster/plugin/moe_hybrid_parallel_plugin.py b/colossalai/booster/plugin/moe_hybrid_parallel_plugin.py
index 45e5a23c1b22..454710fccaa7 100644
--- a/colossalai/booster/plugin/moe_hybrid_parallel_plugin.py
+++ b/colossalai/booster/plugin/moe_hybrid_parallel_plugin.py
@@ -22,7 +22,7 @@
)
from colossalai.cluster import ProcessGroupMesh
from colossalai.interface import ModelWrapper, OptimizerWrapper
-from colossalai.moe import MOE_MANAGER, MoECheckpintIO
+from colossalai.moe import MOE_MANAGER, MoECheckpointIO
from colossalai.pipeline.schedule import OneForwardOneBackwardSchedule
from colossalai.pipeline.stage_manager import PipelineStageManager
from colossalai.shardformer import ShardConfig
@@ -341,9 +341,10 @@ def seed_worker(worker_id):
**_kwargs,
)
- def get_checkpoint_io(self) -> MoECheckpintIO:
+
+ def get_checkpoint_io(self) -> MoECheckpointIO:
if self.checkpoint_io is None:
- self.checkpoint_io = MoECheckpintIO(self.dp_group, self.pp_group, self.tp_group, self.zero_stage)
+ self.checkpoint_io = MoECheckpointIO(self.dp_group, self.pp_group, self.tp_group, self.zero_stage)
else:
self.checkpoint_io = self.checkpoint_io(self.dp_group, self.pp_group, self.tp_group, self.zero_stage)
return self.checkpoint_io
diff --git a/colossalai/checkpoint_io/hybrid_parallel_checkpoint_io.py b/colossalai/checkpoint_io/hybrid_parallel_checkpoint_io.py
index 36df30335dd7..80822724982e 100644
--- a/colossalai/checkpoint_io/hybrid_parallel_checkpoint_io.py
+++ b/colossalai/checkpoint_io/hybrid_parallel_checkpoint_io.py
@@ -51,7 +51,7 @@ class HybridParallelCheckpointIO(GeneralCheckpointIO):
pp_group (ProcessGroup): Process group along pipeline parallel dimension.
tp_group (ProcessGroup): Process group along tensor parallel dimension.
zero_stage (int): The zero stage of plugin. Should be in [0, 1, 2].
- verbose (bool, optional): Whether to print logging massage when saving/loading has been succesfully executed. Defaults to True.
+ verbose (bool, optional): Whether to print logging massage when saving/loading has been successfully executed. Defaults to True.
"""
def __init__(
@@ -574,7 +574,7 @@ def _get_param_id_from_optimizer_param(
for old_pg, saved_pg in zip(optimizer.optim.param_groups, saved_groups):
# obtain updated param group
new_pg = copy.deepcopy(saved_pg)
- new_pg["params"] = old_pg["params"] # The parameters in the same group shouln't change.
+ new_pg["params"] = old_pg["params"] # The parameters in the same group shouldn't change.
updated_groups.append(new_pg)
optimizer.optim.__dict__.update({"param_groups": updated_groups})
diff --git a/colossalai/moe/__init__.py b/colossalai/moe/__init__.py
index 6dd0a5fc3c52..cc33c77f3eed 100644
--- a/colossalai/moe/__init__.py
+++ b/colossalai/moe/__init__.py
@@ -1,4 +1,4 @@
-from .checkpoint import MoECheckpintIO
+from .checkpoint import MoECheckpointIO
from .experts import MLPExperts
from .layers import SparseMLP, apply_load_balance
from .manager import MOE_MANAGER
@@ -14,7 +14,7 @@
"NormalNoiseGenerator",
"UniformNoiseGenerator",
"SparseMLP",
- "MoECheckpintIO",
+ "MoECheckpointIO",
"MOE_MANAGER",
"apply_load_balance",
]
diff --git a/colossalai/moe/checkpoint.py b/colossalai/moe/checkpoint.py
index b37ffabea41f..59a0ec3f0c39 100644
--- a/colossalai/moe/checkpoint.py
+++ b/colossalai/moe/checkpoint.py
@@ -40,7 +40,7 @@
)
-class MoECheckpintIO(HybridParallelCheckpointIO):
+class MoECheckpointIO(HybridParallelCheckpointIO):
def __init__(
self,
dp_group: ProcessGroup,
@@ -373,7 +373,7 @@ def _get_param_id_from_optimizer_param(
for old_pg, saved_pg in zip(optimizer.optim.param_groups, saved_groups):
# obtain updated param group
new_pg = copy.deepcopy(saved_pg)
- new_pg["params"] = old_pg["params"] # The parameters in the same group shouln't change.
+ new_pg["params"] = old_pg["params"] # The parameters in the same group shouldn't change.
updated_groups.append(new_pg)
# ep param group
if len(optimizer.optim.param_groups) > len(saved_groups):
| ## 📌 Checklist before creating the PR
- [ ] I have created an issue for this PR for traceability
- [x] The title follows the standard format: `[doc/gemini/tensor/...]: A concise description`
- [x] I have added relevant tags if possible for us to better distinguish different PRs
## 🚨 Issue number
Not involved
## 📝 What does this PR do?
fix typo change MoECheckpintIO to MoECheckpointIO
make code easy to read
has been globally replaced
detail info
modified: colossalai/accelerator/api.py
modified: colossalai/booster/plugin/gemini_plugin.py
modified: colossalai/booster/plugin/hybrid_parallel_plugin.py
modified: colossalai/booster/plugin/moe_hybrid_parallel_plugin.py
modified: colossalai/checkpoint_io/hybrid_parallel_checkpoint_io.py
modified: colossalai/moe/__init__.py
modified: colossalai/moe/checkpoint.py
modified: colossalai/shardformer/modeling/opt.py
## 💥 Checklist before requesting a review
- [ ] I have linked my PR to an issue ([instruction](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue))
- [ ] My issue clearly describes the problem/feature/proposal, with diagrams/charts/table/code if possible
- [x] I have performed a self-review of my code
- [x] I have added thorough tests.
- [x] I have added docstrings for all the functions/methods I implemented
## ⭐️ Do you enjoy contributing to Colossal-AI?
- [x] 🌝 Yes, I do.
- [ ] 🌚 No, I don't.
Tell us more if you don't enjoy contributing to Colossal-AI.
| https://api.github.com/repos/hpcaitech/ColossalAI/pulls/5335 | 2024-01-30T06:07:43Z | 2024-03-05T13:52:30Z | 2024-03-05T13:52:30Z | 2024-03-06T01:32:19Z | 1,599 | hpcaitech/ColossalAI | 11,574 |
Added type hints and pytest tests | diff --git a/patterns/behavioral/strategy.py b/patterns/behavioral/strategy.py
index 92d11f25..595df255 100644
--- a/patterns/behavioral/strategy.py
+++ b/patterns/behavioral/strategy.py
@@ -8,41 +8,81 @@
"""
+from __future__ import annotations
+
+from typing import Callable, Type
+
+
+class DiscountStrategyValidator: # Descriptor class for check perform
+ @staticmethod
+ def validate(obj: Order, value: Callable) -> bool:
+ try:
+ if obj.price - value(obj) < 0:
+ raise ValueError(
+ f"Discount cannot be applied due to negative price resulting. {value.__name__}"
+ )
+ except ValueError as ex:
+ print(str(ex))
+ return False
+ else:
+ return True
+
+ def __set_name__(self, owner, name: str) -> None:
+ self.private_name = f"_{name}"
+
+ def __set__(self, obj: Order, value: Callable = None) -> None:
+ if value and self.validate(obj, value):
+ setattr(obj, self.private_name, value)
+ else:
+ setattr(obj, self.private_name, None)
+
+ def __get__(self, obj: object, objtype: Type = None):
+ return getattr(obj, self.private_name)
+
+
class Order:
- def __init__(self, price, discount_strategy=None):
- self.price = price
+ discount_strategy = DiscountStrategyValidator()
+
+ def __init__(self, price: float, discount_strategy: Callable = None) -> None:
+ self.price: float = price
self.discount_strategy = discount_strategy
- def price_after_discount(self):
+ def apply_discount(self) -> float:
if self.discount_strategy:
discount = self.discount_strategy(self)
else:
discount = 0
+
return self.price - discount
- def __repr__(self):
- fmt = "<Price: {}, price after discount: {}>"
- return fmt.format(self.price, self.price_after_discount())
+ def __repr__(self) -> str:
+ return f"<Order price: {self.price} with discount strategy: {getattr(self.discount_strategy,'__name__',None)}>"
-def ten_percent_discount(order):
+def ten_percent_discount(order: Order) -> float:
return order.price * 0.10
-def on_sale_discount(order):
+def on_sale_discount(order: Order) -> float:
return order.price * 0.25 + 20
def main():
"""
- >>> Order(100)
- <Price: 100, price after discount: 100>
-
- >>> Order(100, discount_strategy=ten_percent_discount)
- <Price: 100, price after discount: 90.0>
-
- >>> Order(1000, discount_strategy=on_sale_discount)
- <Price: 1000, price after discount: 730.0>
+ >>> order = Order(100, discount_strategy=ten_percent_discount)
+ >>> print(order)
+ <Order price: 100 with discount strategy: ten_percent_discount>
+ >>> print(order.apply_discount())
+ 90.0
+ >>> order = Order(100, discount_strategy=on_sale_discount)
+ >>> print(order)
+ <Order price: 100 with discount strategy: on_sale_discount>
+ >>> print(order.apply_discount())
+ 55.0
+ >>> order = Order(10, discount_strategy=on_sale_discount)
+ Discount cannot be applied due to negative price resulting. on_sale_discount
+ >>> print(order)
+ <Order price: 10 with discount strategy: None>
"""
diff --git a/patterns/structural/3-tier.py b/patterns/structural/3-tier.py
index 64835f99..ecc04243 100644
--- a/patterns/structural/3-tier.py
+++ b/patterns/structural/3-tier.py
@@ -7,7 +7,7 @@
class Data:
- """ Data Store Class """
+ """Data Store Class"""
products = {
"milk": {"price": 1.50, "quantity": 10},
@@ -22,7 +22,7 @@ def __get__(self, obj, klas):
class BusinessLogic:
- """ Business logic holding data store instances """
+ """Business logic holding data store instances"""
data = Data()
@@ -36,7 +36,7 @@ def product_information(
class Ui:
- """ UI interaction class """
+ """UI interaction class"""
def __init__(self) -> None:
self.business_logic = BusinessLogic()
diff --git a/patterns/structural/front_controller.py b/patterns/structural/front_controller.py
index 9377fefe..d93f74d6 100644
--- a/patterns/structural/front_controller.py
+++ b/patterns/structural/front_controller.py
@@ -31,7 +31,7 @@ def dispatch(self, request):
class RequestController:
- """ front controller """
+ """front controller"""
def __init__(self):
self.dispatcher = Dispatcher()
@@ -44,7 +44,7 @@ def dispatch_request(self, request):
class Request:
- """ request """
+ """request"""
mobile_type = "mobile"
tablet_type = "tablet"
diff --git a/tests/behavioral/test_strategy.py b/tests/behavioral/test_strategy.py
new file mode 100644
index 00000000..6a3b2504
--- /dev/null
+++ b/tests/behavioral/test_strategy.py
@@ -0,0 +1,52 @@
+import pytest
+
+from patterns.behavioral.strategy import Order, ten_percent_discount, on_sale_discount
+
+
[email protected]
+def order():
+ return Order(100)
+
+
[email protected](
+ "func, discount",
+ [
+ (ten_percent_discount, 10.0),
+ (on_sale_discount, 45.0)
+ ]
+)
+def test_discount_function_return(func, order, discount):
+ assert func(order) == discount
+
+
[email protected](
+ "func, price",
+ [
+ (ten_percent_discount, 100),
+ (on_sale_discount, 100)
+ ]
+)
+def test_order_discount_strategy_validate_success(func, price):
+ order = Order(price, func)
+
+ assert order.price == price
+ assert order.discount_strategy == func
+
+
+def test_order_discount_strategy_validate_error():
+ order = Order(10, discount_strategy=on_sale_discount)
+
+ assert order.discount_strategy is None
+
+
[email protected](
+ "func, price, discount",
+ [
+ (ten_percent_discount, 100, 90.0),
+ (on_sale_discount, 100, 55.0)
+ ]
+)
+def test_discount_apply_success(func, price, discount):
+ order = Order(price, func)
+
+ assert order.apply_discount() == discount
| #373
| https://api.github.com/repos/faif/python-patterns/pulls/374 | 2021-05-27T14:59:30Z | 2021-05-31T16:51:02Z | 2021-05-31T16:51:02Z | 2021-05-31T16:51:07Z | 1,584 | faif/python-patterns | 33,543 |
Update JARVIS.py | diff --git a/JARVIS/JARVIS.py b/JARVIS/JARVIS.py
index 09cf56ba16..5135086c97 100644
--- a/JARVIS/JARVIS.py
+++ b/JARVIS/JARVIS.py
@@ -167,7 +167,7 @@ def get_dict(self):
shell='powershell.exe',
paint='mspaint.exe',
cmd='cmd.exe',
- browser='C:\Program Files\Internet Explorer\iexplore.exe',
+ browser='C:\\Program Files\\Internet Explorer\\iexplore.exe',
)
return _dict
@@ -214,7 +214,7 @@ def get_app(Q):
elif Q == "open discord":
subprocess.call(["discord.exe"])
elif Q == "open browser":
- subprocess.call(["C:\Program Files\Internet Explorer\iexplore.exe"])
+ subprocess.call(["C:\\Program Files\\Internet Explorer\\iexplore.exe"])
# patch-1
elif Q == "open youtube":
webbrowser.open("https://www.youtube.com/") # open youtube
@@ -280,7 +280,8 @@ def get_app(Q):
"shell": "powershell.exe",
"paint": "mspaint.exe",
"cmd": "cmd.exe",
- "browser": "C:\Program Files\Internet Explorer\iexplore.exe",
+ "browser": "C:\\Program Files\Internet Explorer\iexplore.exe",
+ "vscode": "C:\\Users\\Users\\User\\AppData\\Local\\Programs\Microsoft VS Code"
}
# master
|
edited path | https://api.github.com/repos/geekcomputers/Python/pulls/1520 | 2022-05-29T08:11:11Z | 2022-06-04T19:11:19Z | 2022-06-04T19:11:19Z | 2022-06-04T19:11:19Z | 350 | geekcomputers/Python | 31,197 |
Unescape last message | diff --git a/modules/chat.py b/modules/chat.py
index 312f8cb807..e9c2fe7cdf 100644
--- a/modules/chat.py
+++ b/modules/chat.py
@@ -313,12 +313,12 @@ def remove_last_message(history):
else:
last = ['', '']
- return last[0], history
+ return html.unescape(last[0]), history
def send_last_reply_to_input(history):
if len(history['visible']) > 0:
- return history['visible'][-1][1]
+ return html.unescape(history['visible'][-1][1])
else:
return ''
| ## Checklist:
- [x] I have read the [Contributing guidelines](https://github.com/oobabooga/text-generation-webui/wiki/Contributing-guidelines).
![image](https://github.com/oobabooga/text-generation-webui/assets/4073789/decb9aff-0459-4698-801b-b79014467afa)
The changes in a4e903e932c6b3b43b2ccb88f9e75049b2ac4b2e, f6724a1a01f48c70a0c00cc4b2f85501b1e4f9f1, and c4733000d715e422d76f3bf58c12f596df03fc0d cause HTML-escaped strings to be sent to the input box when clicking `Remove last` or `Copy last reply`. This fixes it by unescaping them. | https://api.github.com/repos/oobabooga/text-generation-webui/pulls/3623 | 2023-08-19T05:14:42Z | 2023-08-19T12:29:08Z | 2023-08-19T12:29:08Z | 2023-08-19T20:39:59Z | 145 | oobabooga/text-generation-webui | 26,624 |
[crunchyroll] Add support for mobile URLs and use unicode literals | diff --git a/youtube_dl/extractor/crunchyroll.py b/youtube_dl/extractor/crunchyroll.py
index 2b66bddbbb7..920728e01f1 100644
--- a/youtube_dl/extractor/crunchyroll.py
+++ b/youtube_dl/extractor/crunchyroll.py
@@ -1,4 +1,6 @@
# encoding: utf-8
+from __future__ import unicode_literals
+
import re, base64, zlib
from hashlib import sha1
from math import pow, sqrt, floor
@@ -18,29 +20,29 @@
)
class CrunchyrollIE(InfoExtractor):
- _VALID_URL = r'(?:https?://)?(?:www\.)?(?P<url>crunchyroll\.com/[^/]*/[^/?&]*?(?P<video_id>[0-9]+))(?:[/?&]|$)'
+ _VALID_URL = r'(?:https?://)?(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.com/(?:[^/]*/[^/?&]*?|media/\?id=)(?P<video_id>[0-9]+))(?:[/?&]|$)'
_TESTS = [{
- u'url': u'http://www.crunchyroll.com/wanna-be-the-strongest-in-the-world/episode-1-an-idol-wrestler-is-born-645513',
- u'file': u'645513.flv',
- #u'md5': u'b1639fd6ddfaa43788c85f6d1dddd412',
- u'info_dict': {
- u'title': u'Wanna be the Strongest in the World Episode 1 – An Idol-Wrestler is Born!',
- u'description': u'md5:2d17137920c64f2f49981a7797d275ef',
- u'thumbnail': u'http://img1.ak.crunchyroll.com/i/spire1-tmb/20c6b5e10f1a47b10516877d3c039cae1380951166_full.jpg',
- u'uploader': u'Yomiuri Telecasting Corporation (YTV)',
- u'upload_date': u'20131013',
+ 'url': 'http://www.crunchyroll.com/wanna-be-the-strongest-in-the-world/episode-1-an-idol-wrestler-is-born-645513',
+ 'file': '645513.flv',
+ #'md5': 'b1639fd6ddfaa43788c85f6d1dddd412',
+ 'info_dict': {
+ 'title': 'Wanna be the Strongest in the World Episode 1 – An Idol-Wrestler is Born!',
+ 'description': 'md5:2d17137920c64f2f49981a7797d275ef',
+ 'thumbnail': 'http://img1.ak.crunchyroll.com/i/spire1-tmb/20c6b5e10f1a47b10516877d3c039cae1380951166_full.jpg',
+ 'uploader': 'Yomiuri Telecasting Corporation (YTV)',
+ 'upload_date': '20131013',
},
- u'params': {
+ 'params': {
# rtmp
- u'skip_download': True,
+ 'skip_download': True,
},
}]
_FORMAT_IDS = {
- u'360': (u'60', u'106'),
- u'480': (u'61', u'106'),
- u'720': (u'62', u'106'),
- u'1080': (u'80', u'108'),
+ '360': ('60', '106'),
+ '480': ('61', '106'),
+ '720': ('62', '106'),
+ '1080': ('80', '108'),
}
def _decrypt_subtitles(self, data, iv, id):
@@ -63,7 +65,7 @@ def obfuscate_key(key):
num3 = key ^ num1
num4 = num3 ^ (num3 >> 3) ^ num2
prefix = intlist_to_bytes(obfuscate_key_aux(20, 97, (1, 2)))
- shaHash = bytes_to_intlist(sha1(prefix + str(num4).encode(u'ascii')).digest())
+ shaHash = bytes_to_intlist(sha1(prefix + str(num4).encode('ascii')).digest())
# Extend 160 Bit hash to 256 Bit
return shaHash + [0] * 12
@@ -79,93 +81,98 @@ def next_value(self):
def _convert_subtitles_to_srt(self, subtitles):
i=1
- output = u''
+ output = ''
for start, end, text in re.findall(r'<event [^>]*?start="([^"]+)" [^>]*?end="([^"]+)" [^>]*?text="([^"]+)"[^>]*?>', subtitles):
- start = start.replace(u'.', u',')
- end = end.replace(u'.', u',')
+ start = start.replace('.', ',')
+ end = end.replace('.', ',')
text = clean_html(text)
- text = text.replace(u'\\N', u'\n')
+ text = text.replace('\\N', '\n')
if not text:
continue
- output += u'%d\n%s --> %s\n%s\n\n' % (i, start, end, text)
+ output += '%d\n%s --> %s\n%s\n\n' % (i, start, end, text)
i+=1
return output
def _real_extract(self,url):
mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('video_id')
+
+ if mobj.group('prefix') == 'm':
+ mobile_webpage = self._download_webpage(url, video_id, 'Downloading mobile webpage')
+ webpage_url = self._search_regex(r'<link rel="canonical" href="([^"]+)" />', mobile_webpage, 'webpage_url')
+ else:
+ webpage_url = 'http://www.' + mobj.group('url')
- webpage_url = u'http://www.' + mobj.group('url')
- video_id = mobj.group(u'video_id')
- webpage = self._download_webpage(webpage_url, video_id)
- note_m = self._html_search_regex(r'<div class="showmedia-trailer-notice">(.+?)</div>', webpage, u'trailer-notice', default=u'')
+ webpage = self._download_webpage(webpage_url, video_id, 'Downloading webpage')
+ note_m = self._html_search_regex(r'<div class="showmedia-trailer-notice">(.+?)</div>', webpage, 'trailer-notice', default='')
if note_m:
raise ExtractorError(note_m)
- video_title = self._html_search_regex(r'<h1[^>]*>(.+?)</h1>', webpage, u'video_title', flags=re.DOTALL)
- video_title = re.sub(r' {2,}', u' ', video_title)
- video_description = self._html_search_regex(r'"description":"([^"]+)', webpage, u'video_description', default=u'')
+ video_title = self._html_search_regex(r'<h1[^>]*>(.+?)</h1>', webpage, 'video_title', flags=re.DOTALL)
+ video_title = re.sub(r' {2,}', ' ', video_title)
+ video_description = self._html_search_regex(r'"description":"([^"]+)', webpage, 'video_description', default='')
if not video_description:
video_description = None
- video_upload_date = self._html_search_regex(r'<div>Availability for free users:(.+?)</div>', webpage, u'video_upload_date', fatal=False, flags=re.DOTALL)
+ video_upload_date = self._html_search_regex(r'<div>Availability for free users:(.+?)</div>', webpage, 'video_upload_date', fatal=False, flags=re.DOTALL)
if video_upload_date:
video_upload_date = unified_strdate(video_upload_date)
- video_uploader = self._html_search_regex(r'<div>\s*Publisher:(.+?)</div>', webpage, u'video_uploader', fatal=False, flags=re.DOTALL)
+ video_uploader = self._html_search_regex(r'<div>\s*Publisher:(.+?)</div>', webpage, 'video_uploader', fatal=False, flags=re.DOTALL)
- playerdata_url = compat_urllib_parse.unquote(self._html_search_regex(r'"config_url":"([^"]+)', webpage, u'playerdata_url'))
+ playerdata_url = compat_urllib_parse.unquote(self._html_search_regex(r'"config_url":"([^"]+)', webpage, 'playerdata_url'))
playerdata_req = compat_urllib_request.Request(playerdata_url)
- playerdata_req.data = compat_urllib_parse.urlencode({u'current_page': webpage_url})
- playerdata_req.add_header(u'Content-Type', u'application/x-www-form-urlencoded')
- playerdata = self._download_webpage(playerdata_req, video_id, note=u'Downloading media info')
+ playerdata_req.data = compat_urllib_parse.urlencode({'current_page': webpage_url})
+ playerdata_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
+ playerdata = self._download_webpage(playerdata_req, video_id, note='Downloading media info')
- stream_id = self._search_regex(r'<media_id>([^<]+)', playerdata, u'stream_id')
- video_thumbnail = self._search_regex(r'<episode_image_url>([^<]+)', playerdata, u'thumbnail', fatal=False)
+ stream_id = self._search_regex(r'<media_id>([^<]+)', playerdata, 'stream_id')
+ video_thumbnail = self._search_regex(r'<episode_image_url>([^<]+)', playerdata, 'thumbnail', fatal=False)
formats = []
for fmt in re.findall(r'\?p([0-9]{3,4})=1', webpage):
stream_quality, stream_format = self._FORMAT_IDS[fmt]
- video_format = fmt+u'p'
- streamdata_req = compat_urllib_request.Request(u'http://www.crunchyroll.com/xml/')
+ video_format = fmt+'p'
+ streamdata_req = compat_urllib_request.Request('http://www.crunchyroll.com/xml/')
# urlencode doesn't work!
- streamdata_req.data = u'req=RpcApiVideoEncode%5FGetStreamInfo&video%5Fencode%5Fquality='+stream_quality+u'&media%5Fid='+stream_id+u'&video%5Fformat='+stream_format
- streamdata_req.add_header(u'Content-Type', u'application/x-www-form-urlencoded')
- streamdata_req.add_header(u'Content-Length', str(len(streamdata_req.data)))
- streamdata = self._download_webpage(streamdata_req, video_id, note=u'Downloading media info for '+video_format)
- video_url = self._search_regex(r'<host>([^<]+)', streamdata, u'video_url')
- video_play_path = self._search_regex(r'<file>([^<]+)', streamdata, u'video_play_path')
+ streamdata_req.data = 'req=RpcApiVideoEncode%5FGetStreamInfo&video%5Fencode%5Fquality='+stream_quality+'&media%5Fid='+stream_id+'&video%5Fformat='+stream_format
+ streamdata_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
+ streamdata_req.add_header('Content-Length', str(len(streamdata_req.data)))
+ streamdata = self._download_webpage(streamdata_req, video_id, note='Downloading media info for '+video_format)
+ video_url = self._search_regex(r'<host>([^<]+)', streamdata, 'video_url')
+ video_play_path = self._search_regex(r'<file>([^<]+)', streamdata, 'video_play_path')
formats.append({
- u'url': video_url,
- u'play_path': video_play_path,
- u'ext': 'flv',
- u'format': video_format,
- u'format_id': video_format,
+ 'url': video_url,
+ 'play_path': video_play_path,
+ 'ext': 'flv',
+ 'format': video_format,
+ 'format_id': video_format,
})
subtitles = {}
for sub_id, sub_name in re.findall(r'\?ssid=([0-9]+)" title="([^"]+)', webpage):
- sub_page = self._download_webpage(u'http://www.crunchyroll.com/xml/?req=RpcApiSubtitle_GetXml&subtitle_script_id='+sub_id,\
- video_id, note=u'Downloading subtitles for '+sub_name)
- id = self._search_regex(r'id=\'([0-9]+)', sub_page, u'subtitle_id', fatal=False)
- iv = self._search_regex(r'<iv>([^<]+)', sub_page, u'subtitle_iv', fatal=False)
- data = self._search_regex(r'<data>([^<]+)', sub_page, u'subtitle_data', fatal=False)
+ sub_page = self._download_webpage('http://www.crunchyroll.com/xml/?req=RpcApiSubtitle_GetXml&subtitle_script_id='+sub_id,\
+ video_id, note='Downloading subtitles for '+sub_name)
+ id = self._search_regex(r'id=\'([0-9]+)', sub_page, 'subtitle_id', fatal=False)
+ iv = self._search_regex(r'<iv>([^<]+)', sub_page, 'subtitle_iv', fatal=False)
+ data = self._search_regex(r'<data>([^<]+)', sub_page, 'subtitle_data', fatal=False)
if not id or not iv or not data:
continue
id = int(id)
iv = base64.b64decode(iv)
data = base64.b64decode(data)
- subtitle = self._decrypt_subtitles(data, iv, id).decode(u'utf-8')
- lang_code = self._search_regex(r'lang_code=\'([^\']+)', subtitle, u'subtitle_lang_code', fatal=False)
+ subtitle = self._decrypt_subtitles(data, iv, id).decode('utf-8')
+ lang_code = self._search_regex(r'lang_code=\'([^\']+)', subtitle, 'subtitle_lang_code', fatal=False)
if not lang_code:
continue
subtitles[lang_code] = self._convert_subtitles_to_srt(subtitle)
return {
- u'id': video_id,
- u'title': video_title,
- u'description': video_description,
- u'thumbnail': video_thumbnail,
- u'uploader': video_uploader,
- u'upload_date': video_upload_date,
- u'subtitles': subtitles,
- u'formats': formats,
+ 'id': video_id,
+ 'title': video_title,
+ 'description': video_description,
+ 'thumbnail': video_thumbnail,
+ 'uploader': video_uploader,
+ 'upload_date': video_upload_date,
+ 'subtitles': subtitles,
+ 'formats': formats,
}
| https://api.github.com/repos/ytdl-org/youtube-dl/pulls/2273 | 2014-01-29T22:24:58Z | 2014-01-30T04:15:38Z | 2014-01-30T04:15:38Z | 2014-07-04T07:24:54Z | 3,433 | ytdl-org/youtube-dl | 49,771 |
|
Hide Grid view mapped task pagination when data is a single page | diff --git a/airflow/www/static/js/tree/Table.jsx b/airflow/www/static/js/tree/Table.jsx
index 06eb84cad46a3..aef91ce905f2e 100644
--- a/airflow/www/static/js/tree/Table.jsx
+++ b/airflow/www/static/js/tree/Table.jsx
@@ -146,6 +146,7 @@ const Table = ({
})}
</Tbody>
</ChakraTable>
+ {totalEntries > data.length && (
<Flex alignItems="center" justifyContent="flex-start" my={4}>
<IconButton
variant="ghost"
@@ -169,6 +170,7 @@ const Table = ({
{totalEntries}
</Text>
</Flex>
+ )}
</>
);
};
| Hide table pagination when all of the data fits onto a single page.
---
**^ Add meaningful description above**
Read the **[Pull Request Guidelines](https://github.com/apache/airflow/blob/main/CONTRIBUTING.rst#pull-request-guidelines)** for more information.
In case of fundamental code change, Airflow Improvement Proposal ([AIP](https://cwiki.apache.org/confluence/display/AIRFLOW/Airflow+Improvements+Proposals)) is needed.
In case of a new dependency, check compliance with the [ASF 3rd Party License Policy](https://www.apache.org/legal/resolved.html#category-x).
In case of backwards incompatible changes please leave a note in [UPDATING.md](https://github.com/apache/airflow/blob/main/UPDATING.md).
| https://api.github.com/repos/apache/airflow/pulls/22963 | 2022-04-12T21:19:36Z | 2022-04-12T21:49:35Z | 2022-04-12T21:49:35Z | 2022-04-26T15:17:10Z | 168 | apache/airflow | 14,263 |
Fix greedy_best_first | diff --git a/graphs/greedy_best_first.py b/graphs/greedy_best_first.py
index 35f7ca9feeef..bb3160047e34 100644
--- a/graphs/greedy_best_first.py
+++ b/graphs/greedy_best_first.py
@@ -6,14 +6,32 @@
Path = list[tuple[int, int]]
-grid = [
- [0, 0, 0, 0, 0, 0, 0],
- [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
- [0, 0, 0, 0, 0, 0, 0],
- [0, 0, 1, 0, 0, 0, 0],
- [1, 0, 1, 0, 0, 0, 0],
- [0, 0, 0, 0, 0, 0, 0],
- [0, 0, 0, 0, 1, 0, 0],
+# 0's are free path whereas 1's are obstacles
+TEST_GRIDS = [
+ [
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 1, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 1, 0, 0, 0, 0],
+ [1, 0, 1, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 1, 0, 0],
+ ],
+ [
+ [0, 0, 0, 1, 1, 0, 0],
+ [0, 0, 0, 0, 1, 0, 1],
+ [0, 0, 0, 1, 1, 0, 0],
+ [0, 1, 0, 0, 1, 0, 0],
+ [1, 0, 0, 1, 1, 0, 1],
+ [0, 0, 0, 0, 0, 0, 0],
+ ],
+ [
+ [0, 0, 1, 0, 0],
+ [0, 1, 0, 0, 0],
+ [0, 0, 1, 0, 1],
+ [1, 0, 0, 1, 1],
+ [0, 0, 0, 0, 0],
+ ],
]
delta = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
@@ -65,10 +83,14 @@ def calculate_heuristic(self) -> float:
def __lt__(self, other) -> bool:
return self.f_cost < other.f_cost
+ def __eq__(self, other) -> bool:
+ return self.pos == other.pos
+
class GreedyBestFirst:
"""
- >>> gbf = GreedyBestFirst((0, 0), (len(grid) - 1, len(grid[0]) - 1))
+ >>> grid = TEST_GRIDS[2]
+ >>> gbf = GreedyBestFirst(grid, (0, 0), (len(grid) - 1, len(grid[0]) - 1))
>>> [x.pos for x in gbf.get_successors(gbf.start)]
[(1, 0), (0, 1)]
>>> (gbf.start.pos_y + delta[3][0], gbf.start.pos_x + delta[3][1])
@@ -78,11 +100,14 @@ class GreedyBestFirst:
>>> gbf.retrace_path(gbf.start)
[(0, 0)]
>>> gbf.search() # doctest: +NORMALIZE_WHITESPACE
- [(0, 0), (1, 0), (2, 0), (3, 0), (3, 1), (4, 1), (5, 1), (6, 1),
- (6, 2), (6, 3), (5, 3), (5, 4), (5, 5), (6, 5), (6, 6)]
+ [(0, 0), (1, 0), (2, 0), (2, 1), (3, 1), (4, 1), (4, 2), (4, 3),
+ (4, 4)]
"""
- def __init__(self, start: tuple[int, int], goal: tuple[int, int]):
+ def __init__(
+ self, grid: list[list[int]], start: tuple[int, int], goal: tuple[int, int]
+ ):
+ self.grid = grid
self.start = Node(start[1], start[0], goal[1], goal[0], 0, None)
self.target = Node(goal[1], goal[0], goal[1], goal[0], 99999, None)
@@ -114,14 +139,6 @@ def search(self) -> Path | None:
if child_node not in self.open_nodes:
self.open_nodes.append(child_node)
- else:
- # retrieve the best current path
- better_node = self.open_nodes.pop(self.open_nodes.index(child_node))
-
- if child_node.g_cost < better_node.g_cost:
- self.open_nodes.append(child_node)
- else:
- self.open_nodes.append(better_node)
if not self.reached:
return [self.start.pos]
@@ -131,28 +148,22 @@ def get_successors(self, parent: Node) -> list[Node]:
"""
Returns a list of successors (both in the grid and free spaces)
"""
- successors = []
- for action in delta:
- pos_x = parent.pos_x + action[1]
- pos_y = parent.pos_y + action[0]
-
- if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(grid) - 1):
- continue
-
- if grid[pos_y][pos_x] != 0:
- continue
-
- successors.append(
- Node(
- pos_x,
- pos_y,
- self.target.pos_y,
- self.target.pos_x,
- parent.g_cost + 1,
- parent,
- )
+ return [
+ Node(
+ pos_x,
+ pos_y,
+ self.target.pos_x,
+ self.target.pos_y,
+ parent.g_cost + 1,
+ parent,
+ )
+ for action in delta
+ if (
+ 0 <= (pos_x := parent.pos_x + action[1]) < len(self.grid[0])
+ and 0 <= (pos_y := parent.pos_y + action[0]) < len(self.grid)
+ and self.grid[pos_y][pos_x] == 0
)
- return successors
+ ]
def retrace_path(self, node: Node | None) -> Path:
"""
@@ -168,18 +179,21 @@ def retrace_path(self, node: Node | None) -> Path:
if __name__ == "__main__":
- init = (0, 0)
- goal = (len(grid) - 1, len(grid[0]) - 1)
- for elem in grid:
- print(elem)
-
- print("------")
-
- greedy_bf = GreedyBestFirst(init, goal)
- path = greedy_bf.search()
- if path:
- for pos_x, pos_y in path:
- grid[pos_x][pos_y] = 2
+ for idx, grid in enumerate(TEST_GRIDS):
+ print(f"==grid-{idx + 1}==")
+ init = (0, 0)
+ goal = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
+
+ print("------")
+
+ greedy_bf = GreedyBestFirst(grid, init, goal)
+ path = greedy_bf.search()
+ if path:
+ for pos_x, pos_y in path:
+ grid[pos_x][pos_y] = 2
+
+ for elem in grid:
+ print(elem)
| ### Describe your change:
fixes: #8770
* [ ] Add an algorithm?
* [X] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [X] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [X] This pull request is all my own work -- I have not plagiarized.
* [X] I know that pull requests will not be merged if they fail the automated tests.
* [X] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [X] All new Python files are placed inside an existing directory.
* [X] All filenames are in all lowercase characters with no spaces or dashes.
* [X] All functions and variable names follow Python naming conventions.
* [X] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [X] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [X] All new algorithms include at least one URL that points to Wikipedia or another similar explanation.
* [X] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| https://api.github.com/repos/TheAlgorithms/Python/pulls/8775 | 2023-05-27T07:38:52Z | 2023-08-15T22:52:51Z | 2023-08-15T22:52:51Z | 2023-08-15T22:52:54Z | 1,995 | TheAlgorithms/Python | 29,769 |
Fix some pep8 warnings | diff --git a/letsencrypt/cli.py b/letsencrypt/cli.py
index 73dd24bdbea..0bd5f537e4e 100644
--- a/letsencrypt/cli.py
+++ b/letsencrypt/cli.py
@@ -729,11 +729,13 @@ def create_parser(plugins, args):
return helpful.parser, helpful.args
+
# For now unfortunately this constant just needs to match the code below;
# there isn't an elegant way to autogenerate it in time.
VERBS = ["run", "auth", "install", "revoke", "rollback", "config_changes", "plugins"]
HELP_TOPICS = ["all", "security", "paths", "automation", "testing"] + VERBS
+
def _create_subparsers(helpful):
subparsers = helpful.parser.add_subparsers(metavar="SUBCOMMAND")
@@ -741,7 +743,7 @@ def add_subparser(name): # pylint: disable=missing-docstring
if name == "plugins":
func = plugins_cmd
else:
- func = eval(name) # pylint: disable=eval-used
+ func = eval(name) # pylint: disable=eval-used
h = func.__doc__.splitlines()[0]
subparser = subparsers.add_parser(name, help=h, description=func.__doc__)
subparser.set_defaults(func=func)
@@ -762,22 +764,23 @@ def add_subparser(name): # pylint: disable=missing-docstring
helpful.add_group("plugins", description="Plugin options")
helpful.add("auth",
- "--csr", type=read_file, help="Path to a Certificate Signing Request (CSR) in DER format.")
+ "--csr", type=read_file,
+ help="Path to a Certificate Signing Request (CSR) in DER format.")
helpful.add("rollback",
- "--checkpoints", type=int, metavar="N",
- default=flag_default("rollback_checkpoints"),
- help="Revert configuration N number of checkpoints.")
+ "--checkpoints", type=int, metavar="N",
+ default=flag_default("rollback_checkpoints"),
+ help="Revert configuration N number of checkpoints.")
helpful.add("plugins",
- "--init", action="store_true", help="Initialize plugins.")
+ "--init", action="store_true", help="Initialize plugins.")
helpful.add("plugins",
- "--prepare", action="store_true", help="Initialize and prepare plugins.")
+ "--prepare", action="store_true", help="Initialize and prepare plugins.")
helpful.add("plugins",
- "--authenticators", action="append_const", dest="ifaces",
- const=interfaces.IAuthenticator, help="Limit to authenticator plugins only.")
+ "--authenticators", action="append_const", dest="ifaces",
+ const=interfaces.IAuthenticator, help="Limit to authenticator plugins only.")
helpful.add("plugins",
- "--installers", action="append_const", dest="ifaces",
- const=interfaces.IInstaller, help="Limit to installer plugins only.")
+ "--installers", action="append_const", dest="ifaces",
+ const=interfaces.IInstaller, help="Limit to installer plugins only.")
def _paths_parser(helpful):
diff --git a/letsencrypt/tests/cli_test.py b/letsencrypt/tests/cli_test.py
index 0a92aba6293..d0fae370d5e 100644
--- a/letsencrypt/tests/cli_test.py
+++ b/letsencrypt/tests/cli_test.py
@@ -57,7 +57,6 @@ def _call_stdout(self, args):
ret = cli.main(args)
return ret, None, stderr, client
-
def test_no_flags(self):
with mock.patch('letsencrypt.cli.run') as mock_run:
self._call([])
@@ -91,7 +90,6 @@ def test_help(self):
from letsencrypt import cli
self.assertTrue(cli.USAGE in out)
-
def test_rollback(self):
_, _, _, client = self._call(['rollback'])
self.assertEqual(1, client.rollback.call_count)
| https://api.github.com/repos/certbot/certbot/pulls/898 | 2015-10-05T01:41:15Z | 2015-10-05T20:53:02Z | 2015-10-05T20:53:02Z | 2016-05-06T19:22:23Z | 889 | certbot/certbot | 3,410 |
|
chore(ts): Convert utils/testablePose | diff --git a/src/sentry/static/sentry/app/utils/testablePose.jsx b/src/sentry/static/sentry/app/utils/testablePose.tsx
similarity index 83%
rename from src/sentry/static/sentry/app/utils/testablePose.jsx
rename to src/sentry/static/sentry/app/utils/testablePose.tsx
index 2242f2abaed79..2746eb61a88b2 100644
--- a/src/sentry/static/sentry/app/utils/testablePose.jsx
+++ b/src/sentry/static/sentry/app/utils/testablePose.tsx
@@ -1,5 +1,7 @@
/* global process */
+type PoseConfig = {[key: string]: any};
+
/**
* Use with a react-pose animation to disable the animation in testing
* environments.
@@ -7,8 +9,8 @@
* This function simply sets delays and durations to 0.
*/
const testablePose = !process.env.IS_PERCY
- ? a => a
- : function(animation) {
+ ? (a: PoseConfig) => a
+ : function(animation: PoseConfig) {
Object.keys(animation).forEach(pose => {
animation[pose].delay = 0;
animation[pose].delayChildren = 0;
| https://api.github.com/repos/getsentry/sentry/pulls/17857 | 2020-03-24T08:03:27Z | 2020-03-24T17:47:54Z | 2020-03-24T17:47:54Z | 2020-12-19T05:59:29Z | 275 | getsentry/sentry | 44,549 |
|
Check if prompt missing S/R token to prevent accidental mis-gens. | diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py
index 42e1489c4ba..10a82dc9f24 100644
--- a/scripts/xy_grid.py
+++ b/scripts/xy_grid.py
@@ -27,9 +27,16 @@ def fun(p, x, xs):
def apply_prompt(p, x, xs):
+
+ orig_prompt = p.prompt
+ orig_negative_prompt = p.negative_prompt
+
p.prompt = p.prompt.replace(xs[0], x)
p.negative_prompt = p.negative_prompt.replace(xs[0], x)
+ if p.prompt == orig_prompt and p.negative_prompt == orig_negative_prompt:
+ raise RuntimeError(f"Prompt S/R did not find {xs[0]} in prompt or negative prompt. Did you forget to add the token?")
+
def apply_order(p, x, xs):
token_order = []
| This PR adds a check to the X/Y grid prompt s/r (string replace) functionality to see if the token to be replaced is actually in the prompt. If it isn't it raises an error alerting the user to check their prompt.
The motivation is that when doing rapid prompt iterating for grid comparisons, I've occasionally found that I missed putting the token to be replaced and end up with a grid of the same picture instead. For example, lets say I'm working on this prompt:
![Screenshot_20221010_164233](https://user-images.githubusercontent.com/18688190/194951855-dc5aa974-8241-470f-9d93-f446904bffca.png)
![Screenshot_20221010_164309](https://user-images.githubusercontent.com/18688190/194951883-37eee561-708b-49f4-b69a-8df8d0299ac2.png)
This works as expected:
![1664458111530-1234-A spaceship in deep-space](https://user-images.githubusercontent.com/18688190/194952982-b812ebc0-a2e9-4600-a867-3f043847e2e3.jpg)
---
Now after some time exploring finding a new prompt I'm interested in I start again:
![Screenshot_20221010_164330](https://user-images.githubusercontent.com/18688190/194953131-9af9fd59-779c-4f08-8a03-d360fa452cf4.png)
![Screenshot_20221010_164309](https://user-images.githubusercontent.com/18688190/194953194-9cf9dbab-0ef8-4b1e-acc9-5405877a0197.png)
![1664458111531-1234-A spaceship in hyper-space](https://user-images.githubusercontent.com/18688190/194953287-8e1cd1bb-d8f3-4c43-9fb0-a9b6626edd7f.jpg)
Oops, I forgot to change the start word, and have just spent several (or many) minutes generating the same picture over and over. This PR will alert me quickly to the mistake I made.
I don't know of any use-case where one would want to use Prompt S/R without the word being present, so I think throwing an error is ok. If anyone knows of such a usecase, please let me know.
| https://api.github.com/repos/AUTOMATIC1111/stable-diffusion-webui/pulls/2209 | 2022-10-10T21:17:07Z | 2022-10-11T10:16:57Z | 2022-10-11T10:16:57Z | 2022-10-11T11:02:23Z | 203 | AUTOMATIC1111/stable-diffusion-webui | 40,494 |
Remove subpixel upscaling option | diff --git a/lib/model/nn_blocks.py b/lib/model/nn_blocks.py
index b2d3aebe30..180d0649ee 100644
--- a/lib/model/nn_blocks.py
+++ b/lib/model/nn_blocks.py
@@ -9,7 +9,7 @@
from keras.layers.core import Activation
from keras.initializers import he_uniform, VarianceScaling
from .initializers import ICNR, ConvolutionAware
-from .layers import PixelShuffler, SubPixelUpscaling, ReflectionPadding2D
+from .layers import PixelShuffler, ReflectionPadding2D
from .normalization import InstanceNormalization
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
@@ -26,10 +26,6 @@ class NNBlocks():
Parameters
----------
- use_subpixel: bool, Optional
- ``True`` if sub-pixel up-scaling layer should be used instead of pixel shuffler for
- up-scaling. This option is deprecated as sub-pixel up-scaling is Nvidia only, but is kept
- for legacy models. Default: ``False``
use_icnr_init: bool, Optional
``True`` if ICNR initialization should be used rather than the default. Default: ``False``
use_convaware_init: bool, Optional
@@ -44,18 +40,16 @@ class NNBlocks():
is being reloaded. Default: ``True``
"""
def __init__(self,
- use_subpixel=False,
use_icnr_init=False,
use_convaware_init=False,
use_reflect_padding=False,
first_run=True):
- logger.debug("Initializing %s: (use_subpixel: %s, use_icnr_init: %s, use_convaware_init: "
- "%s, use_reflect_padding: %s, first_run: %s)",
- self.__class__.__name__, use_subpixel, use_icnr_init, use_convaware_init,
+ logger.debug("Initializing %s: (use_icnr_init: %s, use_convaware_init: %s, "
+ "use_reflect_padding: %s, first_run: %s)",
+ self.__class__.__name__, use_icnr_init, use_convaware_init,
use_reflect_padding, first_run)
self.names = dict()
self.first_run = first_run
- self.use_subpixel = use_subpixel
self.use_icnr_init = use_icnr_init
self.use_convaware_init = use_convaware_init
self.use_reflect_padding = use_reflect_padding
@@ -311,11 +305,7 @@ def upscale(self, input_tensor, filters, kernel_size=3, padding="same",
var_x = InstanceNormalization(name="{}_instancenorm".format(name))(var_x)
if not res_block_follows:
var_x = LeakyReLU(0.1, name="{}_leakyrelu".format(name))(var_x)
- if self.use_subpixel:
- var_x = SubPixelUpscaling(name="{}_subpixel".format(name),
- scale_factor=scale_factor)(var_x)
- else:
- var_x = PixelShuffler(name="{}_pixelshuffler".format(name), size=scale_factor)(var_x)
+ var_x = PixelShuffler(name="{}_pixelshuffler".format(name), size=scale_factor)(var_x)
return var_x
# <<< DFaker Model Blocks >>> #
diff --git a/plugins/train/_config.py b/plugins/train/_config.py
index 0326bc7160..7ebce93dd3 100644
--- a/plugins/train/_config.py
+++ b/plugins/train/_config.py
@@ -135,13 +135,6 @@ def set_globals(self):
"\n\t Building the model will likely take several minutes as the calculations "
"for this initialization technique are expensive. This will only impact starting "
"a new model.")
- self.add_item(
- section=section, title="subpixel_upscaling", datatype=bool,
- default=False, group="network",
- info="Use subpixel upscaling rather than pixel shuffler. These techniques "
- "are both designed to produce better resolving upscaling than other "
- "methods. Each perform the same operations, but using different TF opts."
- "\n\t https://arxiv.org/pdf/1609.05158.pdf")
self.add_item(
section=section, title="reflect_padding", datatype=bool,
default=False, group="network",
diff --git a/plugins/train/model/_base.py b/plugins/train/model/_base.py
index 8fa9dc8241..2540448f0f 100644
--- a/plugins/train/model/_base.py
+++ b/plugins/train/model/_base.py
@@ -83,8 +83,7 @@ def __init__(self,
self.vram_savings.pingpong,
training_image_size)
- self.blocks = NNBlocks(use_subpixel=self.config["subpixel_upscaling"],
- use_icnr_init=self.config["icnr_init"],
+ self.blocks = NNBlocks(use_icnr_init=self.config["icnr_init"],
use_convaware_init=self.config["conv_aware_init"],
use_reflect_padding=self.config["reflect_padding"],
first_run=self.state.first_run)
@@ -377,9 +376,9 @@ def get_optimizer(self, lr=5e-5, beta_1=0.5, beta_2=0.999): # pylint: disable=i
opt_kwargs = dict(lr=lr, beta_1=beta_1, beta_2=beta_2)
if (self.config.get("clipnorm", False) and
keras.backend.backend() != "plaidml.keras.backend"):
- # NB: Clipnorm is ballooning VRAM usage, which is not expected behavior
- # and may be a bug in Keras/TF.
- # PlaidML has a bug regarding the clipnorm parameter
+ # NB: Clip-norm is ballooning VRAM usage, which is not expected behavior
+ # and may be a bug in Keras/Tensorflow.
+ # PlaidML has a bug regarding the clip-norm parameter
# See: https://github.com/plaidml/plaidml/issues/228
# Workaround by simply removing it.
# TODO: Remove this as soon it is fixed in PlaidML.
@@ -581,7 +580,6 @@ def rename_legacy(self):
self.state.inputs = {"face:0": [64, 64, 3]}
self.state.training_size = 256
self.state.config["coverage"] = 62.5
- self.state.config["subpixel_upscaling"] = False
self.state.config["reflect_padding"] = False
self.state.config["mask_type"] = None
self.state.config["mask_blur_kernel"] = 3
@@ -1014,7 +1012,7 @@ def _update_legacy_config(self):
set it to `mae`. Remove old `dssim_loss` item
* masks - If `learn_mask` does not exist then it is set to ``True`` if `mask_type` is
- not ``None`` otherwised it is set to ``False``.
+ not ``None`` otherwise it is set to ``False``.
* masks type - Replace removed masks 'dfl_full' and 'facehull' with `components` mask
diff --git a/tests/lib/model/nn_blocks_test.py b/tests/lib/model/nn_blocks_test.py
index 9c2c5c980a..9515a2dc9d 100644
--- a/tests/lib/model/nn_blocks_test.py
+++ b/tests/lib/model/nn_blocks_test.py
@@ -15,14 +15,14 @@
from lib.model.nn_blocks import NNBlocks
from lib.utils import get_backend
-_PARAMS = ["use_subpixel", "use_icnr_init", "use_convaware_init", "use_reflect_padding"]
+_PARAMS = ["use_icnr_init", "use_convaware_init", "use_reflect_padding"]
_VALUES = list(product([True, False], repeat=len(_PARAMS)))
_IDS = ["{}[{}]".format("|".join([_PARAMS[idx] for idx, b in enumerate(v) if b]),
get_backend().upper()) for v in _VALUES]
def block_test(layer_func, kwargs={}, input_shape=None):
- """Test routine for a faceswaps neural network blocks.
+ """Test routine for faceswap neural network blocks.
Tests are simple and are to ensure that the blocks compile on both tensorflow
and plaidml backends
@@ -62,13 +62,9 @@ def block_test(layer_func, kwargs={}, input_shape=None):
@pytest.mark.parametrize(_PARAMS, _VALUES, ids=_IDS)
-def test_blocks(use_subpixel, use_icnr_init, use_convaware_init, use_reflect_padding):
+def test_blocks(use_icnr_init, use_convaware_init, use_reflect_padding):
""" Test for all blocks contained within the NNBlocks Class """
- if get_backend() == "amd" and use_subpixel:
- # Subpixel upscaling does not work on plaidml so skip this test
- pytest.skip("Subpixel upscaling not supported in plaidML")
- cls_ = NNBlocks(use_subpixel=use_subpixel,
- use_icnr_init=use_icnr_init,
+ cls_ = NNBlocks(use_icnr_init=use_icnr_init,
use_convaware_init=use_convaware_init,
use_reflect_padding=use_reflect_padding)
block_test(cls_.conv2d, input_shape=(2, 5, 5, 128), kwargs=dict(filters=1024, kernel_size=3))
| This PR removes the subpixel upscaling option from training configuration.
The subpixel upscaling layer does exactly the same job as PixelShuffler, but uses Tensorflow specific ops, which means it will not work with AMD cards.
As the output is identical between the 2 layers, it makes sense to remove the backend limited version.
The layer is kept in the repo (although unselectable) so that previously saved models that used this layer can still be loaded. | https://api.github.com/repos/deepfakes/faceswap/pulls/1024 | 2020-05-13T12:03:46Z | 2020-05-13T12:50:49Z | 2020-05-13T12:50:49Z | 2020-05-13T12:52:19Z | 2,116 | deepfakes/faceswap | 18,645 |
Change link of haproxy plugin to new version | diff --git a/docs/using.rst b/docs/using.rst
index 7c1fac0039d..7764408bfbb 100644
--- a/docs/using.rst
+++ b/docs/using.rst
@@ -193,7 +193,7 @@ postfix_ N Y STARTTLS Everywhere is becoming a Certbot Postfix/Exim plu
=========== ==== ==== ===============================================================
.. _plesk: https://github.com/plesk/letsencrypt-plesk
-.. _haproxy: https://code.greenhost.net/open/letsencrypt-haproxy
+.. _haproxy: https://github.com/greenhost/certbot-haproxy
.. _s3front: https://github.com/dlapiduz/letsencrypt-s3front
.. _gandi: https://github.com/Gandi/letsencrypt-gandi
.. _icecast: https://github.com/e00E/lets-encrypt-icecast
| Greenhost has rewritten their HAProxy plugin and it's hosted on a different location. The original URL also points to this new location: https://code.greenhost.net/open/letsencrypt-haproxy | https://api.github.com/repos/certbot/certbot/pulls/3904 | 2016-12-13T11:03:32Z | 2016-12-13T21:13:56Z | 2016-12-13T21:13:56Z | 2016-12-13T21:13:56Z | 210 | certbot/certbot | 3,280 |
Cleanup RetryQuery notebook | diff --git a/docs/examples/evaluation/RetryQuery.ipynb b/docs/examples/evaluation/RetryQuery.ipynb
index 4b5255a2a46c4..863ccefdd1ac4 100644
--- a/docs/examples/evaluation/RetryQuery.ipynb
+++ b/docs/examples/evaluation/RetryQuery.ipynb
@@ -8,15 +8,22 @@
"# Retry Query Engine"
]
},
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Uncomment to add your OpenAI API key or enable debugging"
+ ]
+ },
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
- "# My OpenAI Key\n",
"import os\n",
- "os.environ['OPENAI_API_KEY'] = \"INSERT OPENAI KEY\"\n",
+ "# os.environ['OPENAI_API_KEY'] = \"INSERT OPENAI KEY\"\n",
"\n",
"# import logging\n",
"# import sys\n",
@@ -25,80 +32,188 @@
"# logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))"
]
},
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "First we ingest the document."
+ ]
+ },
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
+ "outputs": [],
+ "source": [
+ "from llama_index.indices.vector_store.base import VectorStoreIndex\n",
+ "from llama_index.query_engine.retry_source_query_engine import RetrySourceQueryEngine\n",
+ "from llama_index.readers.file.base import SimpleDirectoryReader\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "documents = SimpleDirectoryReader('../data/paul_graham/').load_data()\n",
+ "index = VectorStoreIndex.from_documents(documents)\n",
+ "query = \"What did the author do growing up?\""
+ ]
+ },
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "We will query with the default settings first."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
"outputs": [
{
- "name": "stderr",
+ "name": "stdout",
"output_type": "stream",
"text": [
- "/Users/hongyishi/Documents/GitHub/gpt_index/.venv/lib/python3.9/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
- " from .autonotebook import tqdm as notebook_tqdm\n"
+ "Default query engine response: \n",
+ "The author grew up writing essays, learning Italian, exploring Florence, painting people, working with computers, attending RISD, living in a rent-stabilized apartment, building an online store builder, editing Lisp expressions, publishing essays online, writing essays, painting still life, working on spam filters, cooking for groups, and buying a building in Cambridge.\n"
]
- },
+ }
+ ],
+ "source": [
+ "# Default query engine\n",
+ "default_query_engine = index.as_query_engine()\n",
+ "response = default_query_engine.query(query)\n",
+ "print(f\"Default query engine response: {response}\")"
+ ]
+ },
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Now with retry, if the response does not pass the evaluator the query engine passed in will be rerun with the negative counterexample."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {},
+ "outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
- "Default query engine response: \n",
- "It is not possible to answer this question with the given context information.\n",
"Query engine with retry response: \n",
- "Here is a better answer.\n",
- "\n",
- "It is not possible to determine the exact number of drivers Uber has with the given context information. However, it is known that Uber has more than 70,000 Mobility drivers in the UK and has extended offers to all eligible drivers who are not already represented by an attorney. Additionally, it is known that in October and November of 2016, outside actors downloaded the personal data of approximately 57 million Drivers and consumers worldwide.\n",
- "Query engine with retry guideline response: \n",
- "According to Uber's 2020 Annual Report, there were 3.9 million active drivers on the Uber platform worldwide as of December 31, 2020.\n"
+ "The author grew up writing essays, learning Italian, exploring Florence, painting people, working with computers, attending RISD, living in a rent-controlled apartment, building an online store builder, editing code, launching software, publishing essays online, writing essays, painting still life, working on spam filters, cooking for groups, and buying a building in Cambridge.\n"
]
}
],
"source": [
- "from llama_index.indices.vector_store.base import VectorStoreIndex\n",
- "from llama_index.query_engine.retriever_query_engine import RetrieverQueryEngine\n",
"from llama_index.query_engine.retry_query_engine import (\n",
" RetryQueryEngine,\n",
- " RetryGuidelineQueryEngine,\n",
")\n",
- "from llama_index.query_engine.retry_source_query_engine import RetrySourceQueryEngine\n",
- "from llama_index.readers.file.base import SimpleDirectoryReader\n",
"from llama_index.evaluation.base import QueryResponseEvaluator\n",
- "from llama_index.evaluation.guideline_eval import GuidelineEvaluator\n",
- "from llama_index.response.schema import Response\n",
- "from llama_index.indices.query.query_transform.feedback_transform import (\n",
- " FeedbackQueryTransformation,\n",
- ")\n",
- "from typing import cast\n",
- "\n",
- "\n",
- "uber_docs = SimpleDirectoryReader(input_files=[\"../data/10k/uber_2021.pdf\"]).load_data()\n",
- "index = VectorStoreIndex.from_documents(uber_docs)\n",
- "query = \"How many drivers does Uber have?\"\n",
- "\n",
- "# Default query engine\n",
- "default_query_engine = index.as_query_engine()\n",
- "response = default_query_engine.query(query)\n",
- "print(f\"Default query engine response: {response}\")\n",
"\n",
"# Query engine with retry\n",
"query_response_evaluator = QueryResponseEvaluator()\n",
"retry_query_engine = RetryQueryEngine(index.as_query_engine(), query_response_evaluator)\n",
"retry_response = retry_query_engine.query(query)\n",
- "print(f\"Query engine with retry response: {retry_response}\")\n",
+ "print(f\"Query engine with retry response: {retry_response}\")"
+ ]
+ },
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The Source Retry modifies the query source nodes by filtering the existing source nodes for the query based on llm node evaluation.\n",
+ "\n",
+ "A better solution would be to re-retrieve based on filtering out the nodes that didn't pass evaluation.\n",
"\n",
- "# retry_source_query_engine = RetrySourceQueryEngine(index.as_query_engine(), query_response_evaluator)\n",
- "# retry_source_response = retry_source_query_engine.query(query)\n",
- "# print(f\"Query engine with retry source response: {retry_source_response}\")\n",
+ "TODO"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Query engine with retry source response: \n",
+ "The author grew up writing essays, learning Italian, exploring Florence, painting people, learning about computers, attending RISD, living in a rent-stabilized apartment, building an online store builder, editing Lisp expressions, publishing essays online, writing essays, painting still life, working on spam filters, cooking for groups, and buying a building in Cambridge.\n"
+ ]
+ }
+ ],
+ "source": [
+ "retry_source_query_engine = RetrySourceQueryEngine(index.as_query_engine(), query_response_evaluator)\n",
+ "retry_source_response = retry_source_query_engine.query(query)\n",
+ "print(f\"Query engine with retry source response: {retry_source_response}\") "
+ ]
+ },
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "This module tries to use guidelines to direct the evaluator's behavior. You can customize your own guidelines."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Guideline eval evaluation result: The response is too long and should be summarized. It should also include specific numbers or statistics when possible.\n",
+ "Transformed query: Here is a previous bad answer.\n",
+ "\n",
+ "The author grew up writing essays, learning Italian, exploring Florence, painting people, working with computers, attending RISD, living in a rent-stabilized apartment, building an online store builder, editing Lisp expressions, publishing essays online, writing essays, painting still life, working on spam filters, cooking for groups, and buying a building in Cambridge.\n",
+ "Here is some feedback from the evaluator about the response given.\n",
+ "The response is too long and should be summarized. It should also include specific numbers or statistics when possible.\n",
+ "Now answer the question.\n",
+ "\n",
+ "What experiences did the author have growing up?\n",
+ "Query engine with retry guideline response: \n",
+ "The author had a wide range of experiences growing up. They wrote essays, explored Florence, painted people, worked with computers, attended RISD, lived in a rent-stabilized apartment, built an online store builder, edited Lisp expressions, published essays online, painted still life, worked on spam filters, cooked for groups, and bought a building in Cambridge. They also learned Italian and had a deep understanding of the web and its implications, which enabled them to recognize the potential of publishing essays online and take advantage of the new opportunities it presented.\n"
+ ]
+ }
+ ],
+ "source": [
+ "from llama_index.evaluation.guideline_eval import GuidelineEvaluator, DEFAULT_GUIDELINES\n",
+ "from llama_index.response.schema import Response\n",
+ "from llama_index.indices.query.query_transform.feedback_transform import (\n",
+ " FeedbackQueryTransformation,\n",
+ ")\n",
+ "from llama_index.query_engine.retry_query_engine import (\n",
+ " RetryGuidelineQueryEngine,\n",
+ ")\n",
"\n",
"# Guideline eval\n",
- "guideline_eval = GuidelineEvaluator()\n",
- "# typed_response = response if isinstance(response, Response) else response.get_response()\n",
- "# eval = guideline_eval.evaluate_response(query, typed_response)\n",
- "# print(eval)\n",
- "# feedback_query_transform = FeedbackQueryTransformation(evaluation=eval, resynthesize_query=True)\n",
- "# transformed_query = feedback_query_transform.run(query)\n",
- "# print(transformed_query)\n",
- "retry_guideline_query_engine = RetryGuidelineQueryEngine(index.as_query_engine(), guideline_eval, resynthesize_query=True)\n",
+ "guideline_eval = GuidelineEvaluator(\n",
+ " guidelines=DEFAULT_GUIDELINES\n",
+ " + \"\\nThe response should not be overly long.\\n\"\n",
+ " \"The response should try to summarize where possible.\\n\"\n",
+ ") # just for example\n",
+ "typed_response = response if isinstance(response, Response) else response.get_response()\n",
+ "eval = guideline_eval.evaluate_response(query, typed_response)\n",
+ "print(f\"Guideline eval evaluation result: {eval.feedback}\")\n",
+ "feedback_query_transform = FeedbackQueryTransformation(resynthesize_query=True)\n",
+ "transformed_query = feedback_query_transform.run(query, {\"evaluation\": eval})\n",
+ "print(f\"Transformed query: {transformed_query.query_str}\")\n",
+ "retry_guideline_query_engine = RetryGuidelineQueryEngine(\n",
+ " index.as_query_engine(), guideline_eval, resynthesize_query=True\n",
+ ")\n",
"retry_guideline_response = retry_guideline_query_engine.query(query)\n",
"print(f\"Query engine with retry guideline response: {retry_guideline_response}\")"
]
| # Description
Refactored the Retry Query notebook to make the different implementation variations clearer.
## Type of Change
Notebook update
# How Has This Been Tested?
Notebook run
# Suggested Checklist:
- [ ] I have performed a self-review of my own code
- [ ] I have commented my code, particularly in hard-to-understand areas
- [ ] I have made corresponding changes to the documentation
- [ ] My changes generate no new warnings
- [ ] I have added tests that prove my fix is effective or that my feature works
- [ ] New and existing unit tests pass locally with my changes
| https://api.github.com/repos/run-llama/llama_index/pulls/6381 | 2023-06-09T18:38:27Z | 2023-06-11T08:28:08Z | 2023-06-11T08:28:08Z | 2023-06-11T08:28:08Z | 3,088 | run-llama/llama_index | 6,109 |
reduce wind power further for heuristic landing | diff --git a/gym/envs/box2d/lunar_lander.py b/gym/envs/box2d/lunar_lander.py
index c76646247ff..1107bb64298 100644
--- a/gym/envs/box2d/lunar_lander.py
+++ b/gym/envs/box2d/lunar_lander.py
@@ -703,9 +703,6 @@ def heuristic(env, s):
def demo_heuristic_lander(env, seed=None, render=False):
- # wind power must be reduced for heuristic landing
- env.wind_power = 0.2
-
total_reward = 0
steps = 0
s = env.reset(seed=seed)
diff --git a/tests/envs/test_lunar_lander.py b/tests/envs/test_lunar_lander.py
index b15645859d9..5aeab73f594 100644
--- a/tests/envs/test_lunar_lander.py
+++ b/tests/envs/test_lunar_lander.py
@@ -18,16 +18,6 @@ def test_lunar_lander_continuous():
_test_lander(LunarLander(continuous=True), seed=0)
[email protected](Box2D is None, reason="Box2D not installed")
-def test_lunar_lander_wind():
- _test_lander(LunarLander(enable_wind=True), seed=0)
-
-
[email protected](Box2D is None, reason="Box2D not installed")
-def test_lunar_lander_wind_continuous():
- _test_lander(LunarLander(continuous=True, enable_wind=True), seed=0)
-
-
@pytest.mark.skipif(Box2D is None, reason="Box2D not installed")
def _test_lander(env, seed=None, render=False):
total_reward = demo_heuristic_lander(env, seed=seed, render=render)
| This addresses #2763
| https://api.github.com/repos/openai/gym/pulls/2766 | 2022-04-20T16:51:00Z | 2022-04-21T14:53:41Z | 2022-04-21T14:53:40Z | 2022-04-21T14:53:41Z | 413 | openai/gym | 5,620 |
Updated the check of empty array. | diff --git a/keras/engine/saving.py b/keras/engine/saving.py
index c1027daea43..3a332281ab1 100644
--- a/keras/engine/saving.py
+++ b/keras/engine/saving.py
@@ -249,7 +249,7 @@ def convert_custom_objects(obj):
for name in layer_names:
layer_weights = model_weights_group[name]
weight_names = layer_weights['weight_names']
- if weight_names:
+ if len(weight_names) > 0:
filtered_layer_names.append(name)
layer_names = filtered_layer_names
| ### Summary
We have a lot of warning on travis which go like this:
```
/home/travis/build/keras-team/keras/keras/engine/saving.py:252: DeprecationWarning: The truth value of an empty array is ambiguous. Returning False, but in future this will result in an error. Use `array.size > 0` to check that an array is not empty.
if weight_names:
```
### Related Issues
### PR Overview
- [ ] This PR requires new unit tests [y/n] (make sure tests are included)
- [ ] This PR requires to update the documentation [y/n] (make sure the docs are up-to-date)
- [x] This PR is backwards compatible [y/n]
- [ ] This PR changes the current API [y/n] (all API changes need to be approved by fchollet)
| https://api.github.com/repos/keras-team/keras/pulls/11669 | 2018-11-18T16:49:03Z | 2018-11-18T18:33:40Z | 2018-11-18T18:33:40Z | 2018-11-18T22:32:31Z | 134 | keras-team/keras | 47,203 |
add Logic Inference Dataset | diff --git a/data/datasets/__init__.py b/data/datasets/__init__.py
index fb573c7d6c..f337023897 100644
--- a/data/datasets/__init__.py
+++ b/data/datasets/__init__.py
@@ -20,6 +20,7 @@
"tell_a_joke": "mikegarts/oa_tell_a_joke_20000",
"oa_wiki_qa_bart_10000row": "michaelthwan/oa_wiki_qa_bart_10000row",
"oa_leet10k": "ehartford/oa_leet10k",
+ "LogicInference_OA": "KK04/LogicInference_OA",
}
SAFETY_DATASETS = {
diff --git a/data/datasets/logicreference_OA/README.md b/data/datasets/logicreference_OA/README.md
new file mode 100644
index 0000000000..ee639f09c6
--- /dev/null
+++ b/data/datasets/logicreference_OA/README.md
@@ -0,0 +1,97 @@
+# LogicInference Dataset
+
+This repository contains the Python code used to generate the `LogicInference`
+dataset. `LogicInference` is a dataset designed to evaluate the ability of
+models to perform logical inference. The dataset focuses on inference using
+propositional logic and a small subset of first-order logic, represented both in
+semi-formal logical notation, and in natural language. `LogicInference` has two
+main long-term goals: (1) to evaluate the ability of models to perform logical
+inference, and the degree to which inference chains are real or hallucinated,
+and (2) to assess whether learning logical inference abilities in the abstract
+(e.g., getting better in this dataset) would then transfer to other real-world
+tasks.
+
+**Note: to run this code you also need the other files from the original
+LogicInference project
+[here](https://github.com/google-research/google-research/tree/master/logic_inference_dataset).
+The generate_dataset script in this directory is a drop-in replacement for the
+original generate_dataset script which outputs data in Open Assistant instruct
+format.**
+
+For a detailed description of the dataset, please check the following paper:
+https://openreview.net/pdf?id=HAGeIS_Lcg9 (arXiv preprint:
+https://arxiv.org/abs/2203.15099 )
+
+Please cite as:
+
+```
+@inproceedings{ontanon2022logicinference,
+ url = {https://openreview.net/pdf?id=HAGeIS_Lcg9},
+ author = {Onta\~{n}\'{o}n, Santiago and Ainslie, Joshua and Cvicek, Vaclav and Fisher, Zachary},
+ title = {{LogicInference}: A New Dataset for Teaching Logical Inference to seq2seq Models},
+ booktitle={Proceedings of ICLR 2022 workshop on Objects, Structure and Causality},
+ year={2022}
+}
+```
+
+This is an re-produce of the dataset from LogicInference Dataset in paper:
+https://openreview.net/pdf?id=HAGeIS_Lcg9.
+
+The github page of LogicInference Dataset:
+https://github.com/google-research/google-research/tree/master/logic_inference_dataset.
+
+This dataset is aimed to offer more dataset for Open Assistant project,
+depending on their demands, there three columns: INSTRUCTION, RESPONSE, SOURCE.
+
+The results in this dataset is a little different from which was introduced in
+the original paper:
+
+1.For all three splits (IID/OOD/length), only IID is used. In the original
+paper, it seems that model can reach better performance with data generated by
+this split method.
+
+2.In the original paper, there are two form of responses:
+LOGICINFERENCE<sub>b</sub> (with the answer at the beginning) and
+LOGICINFERENCE<sub>e</sub> (with the answer at the end). This dataset uses
+LOGICINFERENCE<sub>e</sub>, that means: for all questions, the model will first
+do logic inference, and give the final answer at the end.
+
+3.The original paper, some parameters in generate_dataset.py are:
+
+N_INFERENCE_PROBLEMS = 5000
+
+N_VARIATIONS = 25
+
+N_EXAMPLES = 200000
+
+TRAIN_RATIO = 0.9
+
+LENGTH_SPLIT_THRESHOLD = 4
+
+RANDOM_SEED = 0
+
+I choose some new parameters:
+
+N_INFERENCE_PROBLEMS = 10000
+
+N_VARIATIONS = 25
+
+N_EXAMPLES = 55000
+
+TRAIN_RATIO = 1
+
+LENGTH_SPLIT_THRESHOLD = 4
+
+RANDOM_SEED = 1111
+
+The original script generated 4814 different inference problems and extended all
+those inference problems to around 200,000 Q-A pairs. My settings generated 5491
+different inference problems and extended them to around 54,607
+Instruction-Response pairs. I think for Open Assistant projects, maybe the
+number of different inference problems is more important, and generated many
+similar Instruction-Response pairs will only add training time and doesn't make
+much sense.
+
+4.I only keep the generate_dataset.py file in this directory, because the coding
+format of the original project does not fit OA project which need flake8 format.
+I only change the coding format of generate_dateset.py.
diff --git a/data/datasets/logicreference_OA/generate_dataset.py b/data/datasets/logicreference_OA/generate_dataset.py
new file mode 100644
index 0000000000..59ff45ff39
--- /dev/null
+++ b/data/datasets/logicreference_OA/generate_dataset.py
@@ -0,0 +1,129 @@
+# Copyright 2023 The Google Research Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Driver file that generates IID/OOD/length splits.
+"""
+
+
+import os
+import random
+
+import rules
+import splits
+import tensorflow as tf
+from absl import app
+
+# Generation parameters:
+# TARGET_FOLDER = "/path/to/generate/dataset/"
+TARGET_FOLDER = "./e_txt/"
+ANSWER_AT_THE_END = True
+LENGTH_DISTRIBUTION = [0.425, 0.3, 0.2, 0.05, 0.025]
+N_INFERENCE_PROBLEMS = 10000
+N_VARIATIONS = 25
+N_EXAMPLES = 55000
+TRAIN_RATIO = 1
+LENGTH_SPLIT_THRESHOLD = 4
+RANDOM_SEED = 1111
+
+
+def create_string_feature(values):
+ """Creates TensorFlow string features.
+
+ Args:
+ values: A sequence of unicode strings.
+
+ Returns:
+ An entry of int tf.train.Feature.
+ """
+ # Converts to `str` (in Python 2) and `bytes` (in Python 3) as
+ # `tf.train.Feature` only takes bytes.
+ values = [value.encode("utf-8") for value in values]
+
+ feature = tf.train.Feature(bytes_list=tf.train.BytesList(value=values))
+ return feature
+
+
+def generate_t5_split(path, file_name, examples):
+ print(f"Generating split of size {len(examples)} at {path}")
+ os.makedirs(path, exist_ok=True)
+ with open(os.path.join(path, file_name), "w") as f:
+ for example in examples:
+ f.write(f"INSTRUCTION: {example.inputs}\n")
+ f.write(f"RESPONSE: {example.targets}\n")
+ f.write("SOURCE: LogicInference Dataset e\n\n")
+
+
+def main(_):
+ rules.precompute_rules()
+
+ suffix = ""
+ if ANSWER_AT_THE_END:
+ suffix = "_e"
+ folder_iid_name = "logic_inference_iid" + suffix
+
+ # Generate each of the splits:
+ print("IID:")
+ random.seed(RANDOM_SEED)
+ (train_examples, test_examples) = splits.generate_training_and_test_sets_iid(
+ N_INFERENCE_PROBLEMS,
+ N_VARIATIONS,
+ N_EXAMPLES,
+ TRAIN_RATIO,
+ length_distribution=LENGTH_DISTRIBUTION,
+ answer_at_the_end=ANSWER_AT_THE_END,
+ )
+ generate_t5_split(
+ os.path.join(TARGET_FOLDER, folder_iid_name),
+ f"{folder_iid_name}-train_tf_examples-00000-of-00001",
+ train_examples,
+ )
+ generate_t5_split(
+ os.path.join(TARGET_FOLDER, folder_iid_name),
+ f"{folder_iid_name}-test_tf_examples-00000-of-00001",
+ test_examples,
+ )
+
+ # print("OOD:")
+ # random.seed(RANDOM_SEED)
+ # (train_examples, test_examples) = splits.generate_training_and_test_sets_ood(
+ # N_INFERENCE_PROBLEMS, N_VARIATIONS, N_EXAMPLES, TRAIN_RATIO,
+ # length_distribution=LENGTH_DISTRIBUTION,
+ # answer_at_the_end=ANSWER_AT_THE_END)
+ # generate_t5_split(os.path.join(TARGET_FOLDER, folder_ood_name),
+ # f"{folder_ood_name}-train_tf_examples-00000-of-00001",
+ # train_examples)
+ # generate_t5_split(os.path.join(TARGET_FOLDER, folder_ood_name),
+ # f"{folder_ood_name}-test_tf_examples-00000-of-00001",
+ # test_examples)
+ #
+ # print("Length:")
+ # random.seed(RANDOM_SEED)
+ # (train_examples,
+ # test_examples) = splits.generate_training_and_test_sets_length(
+ # N_INFERENCE_PROBLEMS,
+ # N_VARIATIONS,
+ # N_EXAMPLES,
+ # LENGTH_SPLIT_THRESHOLD,
+ # length_distribution=LENGTH_DISTRIBUTION,
+ # answer_at_the_end=ANSWER_AT_THE_END)
+ # generate_t5_split(
+ # os.path.join(TARGET_FOLDER, folder_length_name),
+ # f"{folder_length_name}-train_tf_examples-00000-of-00001", train_examples)
+ # generate_t5_split(
+ # os.path.join(TARGET_FOLDER, folder_length_name),
+ # f"{folder_length_name}-test_tf_examples-00000-of-00001", test_examples)
+
+
+if __name__ == "__main__":
+ app.run(main)
diff --git a/data/datasets/logicreference_OA/requirements.txt b/data/datasets/logicreference_OA/requirements.txt
new file mode 100644
index 0000000000..5261ed7c0e
--- /dev/null
+++ b/data/datasets/logicreference_OA/requirements.txt
@@ -0,0 +1,2 @@
+absl_py>=0.13.0
+tensorflow>=2.6.0
| Regarding https://github.com/LAION-AI/Open-Assistant/issues/261.
This is an re-produce of the dataset from LogicInference Dataset in paper: https://openreview.net/pdf?id=HAGeIS_Lcg9. I think it will helpful for improving logic inference ability of the model.
The github page of LogicInference Dataset: https://github.com/google-research/google-research/tree/master/logic_inference_dataset.
This dataset is aimed to offer more dataset for Open Assistant project, depending on their demands, there three columns: INSTRUCTION, RESPONSE, SOURCE.
The results in this dataset is a little different from which was introduced in the original paper:
1.For all three splits (IID/OOD/length), only IID is used. In the original paper, it seems that model can reach better performance with data generated by this split method.
2.In the original paper, there are two form of responses: LOGICINFERENCEb (with the answer at the beginning) and LOGICINFERENCEe (with the answer at the end). This dataset uses LOGICINFERENCEe, that means: for all questions, the model will first do logic inference, and give the final answer at the end.
3.The original paper, some parameters in generate_dataset.py are:
N_INFERENCE_PROBLEMS = 5000
N_VARIATIONS = 25
N_EXAMPLES = 200000
TRAIN_RATIO = 0.9
LENGTH_SPLIT_THRESHOLD = 4
RANDOM_SEED = 0
I choose some new parameters:
N_INFERENCE_PROBLEMS = 10000
N_VARIATIONS = 25
N_EXAMPLES = 55000
TRAIN_RATIO = 1
LENGTH_SPLIT_THRESHOLD = 4
RANDOM_SEED = 1111
The original script generated 4814 different inference problems and extended all those inference problems to around 200,000 Q-A pairs. My settings generated 5491 different inference problems and extended them to around 54,607 Instruction-Response pairs. I think for Open Assistant projects, maybe the number of different inference problems is more important, and generated many similar Instruction-Response pairs will only add training time and doesn't make much sense. | https://api.github.com/repos/LAION-AI/Open-Assistant/pulls/2337 | 2023-04-05T17:16:15Z | 2023-04-11T07:38:40Z | 2023-04-11T07:38:40Z | 2023-04-11T07:38:41Z | 2,611 | LAION-AI/Open-Assistant | 36,891 |
Don't ruin the complexity of your interface with checking. | diff --git a/CppCoreGuidelines.md b/CppCoreGuidelines.md
index 7ccb8a2c7..ef03b1102 100644
--- a/CppCoreGuidelines.md
+++ b/CppCoreGuidelines.md
@@ -749,7 +749,7 @@ The date is validated twice (by the `Date` constructor) and passed as a characte
##### Example
Excess checking can be costly.
-There are cases where checking early is dumb because you may not ever need the value, or may only need part of the value that is more easily checked than the whole.
+There are cases where checking early is dumb because you may not ever need the value, or may only need part of the value that is more easily checked than the whole. Similarly, don't add validity checks that change the asymptotic behavior of your interface (e.g., don't add a `O(n)` check to an interface with an average complexity of `O(1)`).
class Jet { // Physics says: e*e < x*x + y*y + z*z
| I'm not sure if this is redundant with the existing text "or may only need part of the value that is more easily checked than the whole." (A more aggressive change would excise that clause.)
| https://api.github.com/repos/isocpp/CppCoreGuidelines/pulls/394 | 2015-11-17T21:24:42Z | 2015-11-18T21:43:16Z | 2015-11-18T21:43:16Z | 2015-12-01T20:09:01Z | 233 | isocpp/CppCoreGuidelines | 15,930 |
Fix typos | diff --git a/README.md b/README.md
index 3d690be89..5d42295c9 100644
--- a/README.md
+++ b/README.md
@@ -74,7 +74,7 @@ To contribute to diagram, check out [contribution guidelines](CONTRIBUTING.md).
[GitPitch](https://gitpitch.com/) is a markdown presentation service for developers. Diagrams is now integrated as [Cloud Diagram Widget](https://gitpitch.com/docs/diagram-features/cloud-diagrams/) of GitPitch, so you can use the Diagrams when to create slide decks for Tech Conferences, Meetups, and Training with GitPitch.
-[Cloudiscovery](https://github.com/Cloud-Architects/cloudiscovery) helps you to analyze resources in your cloud (AWS/GCP/Azure/Alibaba/IBM) account. It allows you to create a diagram of analyzed cloud resource map based on this Diagrams library, so you can draw the your existing cloud infratructure with Cloudicovery.
+[Cloudiscovery](https://github.com/Cloud-Architects/cloudiscovery) helps you to analyze resources in your cloud (AWS/GCP/Azure/Alibaba/IBM) account. It allows you to create a diagram of analyzed cloud resource map based on this Diagrams library, so you can draw the your existing cloud infrastructure with Cloudicovery.
[Airflow Diagrams](https://github.com/feluelle/airflow-diagrams) is an Airflow plugin that aims to easily visualise your Airflow DAGs on service level from providers like AWS, GCP, Azure, etc. via diagrams.
| https://api.github.com/repos/mingrammer/diagrams/pulls/310 | 2020-10-01T10:50:18Z | 2020-10-05T14:00:45Z | 2020-10-05T14:00:45Z | 2020-10-05T14:00:45Z | 353 | mingrammer/diagrams | 52,703 |
|
COMPAT: np.full not available in all versions, xref #16773 | diff --git a/pandas/core/sparse/frame.py b/pandas/core/sparse/frame.py
index e157ae16e71f9..5fe96d70fc16f 100644
--- a/pandas/core/sparse/frame.py
+++ b/pandas/core/sparse/frame.py
@@ -163,7 +163,9 @@ def _init_dict(self, data, index, columns, dtype=None):
# TODO: figure out how to handle this case, all nan's?
# add in any other columns we want to have (completeness)
- nan_arr = sp_maker(np.full(len(index), np.nan))
+ nan_arr = np.empty(len(index), dtype='float64')
+ nan_arr.fill(np.nan)
+ nan_arr = sp_maker(nan_arr)
sdict.update((c, nan_arr) for c in columns if c not in sdict)
return to_manager(sdict, columns, index)
| https://api.github.com/repos/pandas-dev/pandas/pulls/17000 | 2017-07-17T23:34:16Z | 2017-07-18T01:31:43Z | 2017-07-18T01:31:43Z | 2017-07-18T01:31:43Z | 203 | pandas-dev/pandas | 45,467 |
|
Readability improvements to singly_linked_list.py | diff --git a/data_structures/linked_list/singly_linked_list.py b/data_structures/linked_list/singly_linked_list.py
index 16436ff90274..73b982316e76 100644
--- a/data_structures/linked_list/singly_linked_list.py
+++ b/data_structures/linked_list/singly_linked_list.py
@@ -6,56 +6,56 @@ def __init__(self, data):
class Linked_List:
def __init__(self):
- self.Head = None # Initialize Head to None
+ self.head = None # Initialize head to None
def insert_tail(self, data):
- if self.Head is None:
+ if self.head is None:
self.insert_head(data) # If this is first node, call insert_head
else:
- temp = self.Head
+ temp = self.head
while temp.next != None: # traverse to last node
temp = temp.next
temp.next = Node(data) # create node & link to tail
def insert_head(self, data):
newNod = Node(data) # create a new node
- if self.Head != None:
- newNod.next = self.Head # link newNode to head
- self.Head = newNod # make NewNode as Head
+ if self.head != None:
+ newNod.next = self.head # link newNode to head
+ self.head = newNod # make NewNode as head
def printList(self): # print every node data
- tamp = self.Head
- while tamp is not None:
- print(tamp.data)
- tamp = tamp.next
+ temp = self.head
+ while temp is not None:
+ print(temp.data)
+ temp = temp.next
def delete_head(self): # delete from head
- temp = self.Head
- if self.Head != None:
- self.Head = self.Head.next
+ temp = self.head
+ if self.head != None:
+ self.head = self.head.next
temp.next = None
return temp
def delete_tail(self): # delete from tail
- tamp = self.Head
- if self.Head != None:
- if self.Head.next is None: # if Head is the only Node in the Linked List
- self.Head = None
+ temp = self.head
+ if self.head != None:
+ if self.head.next is None: # if head is the only Node in the Linked List
+ self.head = None
else:
- while tamp.next.next is not None: # find the 2nd last element
- tamp = tamp.next
- tamp.next, tamp = (
+ while temp.next.next is not None: # find the 2nd last element
+ temp = temp.next
+ temp.next, temp = (
None,
- tamp.next,
- ) # (2nd last element).next = None and tamp = last element
- return tamp
+ temp.next,
+ ) # (2nd last element).next = None and temp = last element
+ return temp
def isEmpty(self):
- return self.Head is None # Return if Head is none
+ return self.head is None # Return if head is none
def reverse(self):
prev = None
- current = self.Head
+ current = self.head
while current:
# Store the current node's next node.
@@ -67,15 +67,15 @@ def reverse(self):
# Make the current node the next node (to progress iteration)
current = next_node
# Return prev in order to put the head at the end
- self.Head = prev
+ self.head = prev
def main():
A = Linked_List()
- print("Inserting 1st at Head")
+ print("Inserting 1st at head")
a1 = input()
A.insert_head(a1)
- print("Inserting 2nd at Head")
+ print("Inserting 2nd at head")
a2 = input()
A.insert_head(a2)
print("\nPrint List : ")
@@ -88,7 +88,7 @@ def main():
A.insert_tail(a4)
print("\nPrint List : ")
A.printList()
- print("\nDelete Head")
+ print("\nDelete head")
A.delete_head()
print("Delete Tail")
A.delete_tail()
| Fixes #1397
I have changed the "Head" to "head"(making it more Pythonic) and also "tamp" to "temp"(Better readability) in singly linked list(under data structures)
PS: I'm a newbie at opensource , so if I have done something wrong please do tell. I wish to learn as much as possible
Thanks | https://api.github.com/repos/TheAlgorithms/Python/pulls/1403 | 2019-10-19T13:42:55Z | 2019-10-19T18:14:38Z | 2019-10-19T18:14:38Z | 2019-10-19T18:14:38Z | 997 | TheAlgorithms/Python | 29,835 |
kucoin2 parseTrade fix | diff --git a/js/kucoin2.js b/js/kucoin2.js
index 69488b32a62b..c6be7e31eafe 100644
--- a/js/kucoin2.js
+++ b/js/kucoin2.js
@@ -830,7 +830,7 @@ module.exports = class kucoin2 extends Exchange {
const fee = {
'cost': this.safeFloat (trade, 'fee'),
'rate': this.safeFloat (trade, 'feeRate'),
- 'feeCurrency': this.safeString (trade, 'feeCurrency'),
+ 'currency': this.safeString (trade, 'feeCurrency'),
};
const type = this.safeString (trade, 'type');
let cost = this.safeFloat (trade, 'funds');
| https://api.github.com/repos/ccxt/ccxt/pulls/4736 | 2019-02-23T11:54:02Z | 2019-02-23T12:49:44Z | 2019-02-23T12:49:44Z | 2019-02-23T12:49:44Z | 171 | ccxt/ccxt | 13,878 |
|
Change build status links to travis-ci.com. | diff --git a/README.rst b/README.rst
index 62681c7eb47..f555812689f 100644
--- a/README.rst
+++ b/README.rst
@@ -99,8 +99,8 @@ ACME working area in github: https://github.com/ietf-wg-acme/acme
|build-status| |coverage| |docs| |container|
-.. |build-status| image:: https://travis-ci.org/certbot/certbot.svg?branch=master
- :target: https://travis-ci.org/certbot/certbot
+.. |build-status| image:: https://travis-ci.com/certbot/certbot.svg?branch=master
+ :target: https://travis-ci.com/certbot/certbot
:alt: Travis CI status
.. |coverage| image:: https://codecov.io/gh/certbot/certbot/branch/master/graph/badge.svg
| As part of migrating away from the now deprecated support for GitHub services, our Travis config has moved from travis-ci.org to travis-ci.com. This PR updates the links to our build status to use travis-ci.com.
The reference to travis-ci.org at https://github.com/certbot/certbot/blob/651de2dd2fea9c79caae96d039f2570518735bdc/certbot/configuration.py#L98 wasn't updated because build history from travis-ci.org isn't migrated over by Travis yet. Once this has happened, Travis says it will automatically redirect links to travis-ci.org to travis-ci.com. See https://docs.travis-ci.com/user/open-source-repository-migration for more info. | https://api.github.com/repos/certbot/certbot/pulls/6656 | 2019-01-14T18:01:25Z | 2019-01-14T22:06:53Z | 2019-01-14T22:06:53Z | 2019-01-14T22:06:57Z | 214 | certbot/certbot | 2,874 |
Huobi fetchPosition edits | diff --git a/js/huobi.js b/js/huobi.js
index 003ad2f6e746..41a9ddbbdc07 100644
--- a/js/huobi.js
+++ b/js/huobi.js
@@ -5661,51 +5661,120 @@ module.exports = class huobi extends Exchange {
'cross': 'contractPrivatePostLinearSwapApiV1SwapCrossAccountPositionInfo',
});
//
+ // isolated
+ //
// {
- // status: 'ok',
- // data: [
- // {
- // positions: [
+ // "status": "ok",
+ // "data": [
// {
- // symbol: 'BTC',
- // contract_code: 'BTC-USDT',
- // volume: 1,
- // available: 1,
- // frozen: 0,
- // cost_open: 47027.1,
- // cost_hold: 47324.4,
- // profit_unreal: 0.1705,
- // profit_rate: -0.269631765513927,
- // lever_rate: 100,
- // position_margin: 0.471539,
- // direction: 'sell',
- // profit: -0.1268,
- // last_price: 47153.9,
- // margin_asset: 'USDT',
- // margin_mode: 'isolated',
- // margin_account: 'BTC-USDT'
- // }
- // ],
- // symbol: 'BTC',
- // margin_balance: 8.01274699,
- // margin_position: 0.471539,
- // margin_frozen: 0,
- // margin_available: 7.54120799,
- // profit_real: 0,
- // profit_unreal: 0.1705,
- // risk_rate: 16.442755615124092,
- // withdraw_available: 7.37070799,
- // liquidation_price: 54864.89009448036,
- // lever_rate: 100,
- // adjust_factor: 0.55,
- // margin_static: 7.84224699,
- // contract_code: 'BTC-USDT',
- // margin_asset: 'USDT',
- // margin_mode: 'isolated',
- // margin_account: 'BTC-USDT'
- // }
- // ],
- // ts: 1641162539767
+ // "positions": [],
+ // "symbol": "BTC",
+ // "margin_balance": 1.949728350000000000,
+ // "margin_position": 0,
+ // "margin_frozen": 0E-18,
+ // "margin_available": 1.949728350000000000,
+ // "profit_real": -0.050271650000000000,
+ // "profit_unreal": 0,
+ // "risk_rate": null,
+ // "withdraw_available": 1.949728350000000000,
+ // "liquidation_price": null,
+ // "lever_rate": 20,
+ // "adjust_factor": 0.150000000000000000,
+ // "margin_static": 1.949728350000000000,
+ // "contract_code": "BTC-USDT",
+ // "margin_asset": "USDT",
+ // "margin_mode": "isolated",
+ // "margin_account": "BTC-USDT",
+ // "trade_partition": "USDT",
+ // "position_mode": "dual_side"
+ // },
+ // ... opposite side position can be present here too (if hedge)
+ // ],
+ // "ts": 1653605008286
+ // }
+ //
+ // cross
+ //
+ // {
+ // "status": "ok",
+ // "data": {
+ // "positions": [
+ // {
+ // "symbol": "BTC",
+ // "contract_code": "BTC-USDT",
+ // "volume": "1.000000000000000000",
+ // "available": "1.000000000000000000",
+ // "frozen": "0E-18",
+ // "cost_open": "29530.000000000000000000",
+ // "cost_hold": "29530.000000000000000000",
+ // "profit_unreal": "-0.010000000000000000",
+ // "profit_rate": "-0.016931933626820200",
+ // "lever_rate": "50",
+ // "position_margin": "0.590400000000000000",
+ // "direction": "buy",
+ // "profit": "-0.010000000000000000",
+ // "last_price": "29520",
+ // "margin_asset": "USDT",
+ // "margin_mode": "cross",
+ // "margin_account": "USDT",
+ // "contract_type": "swap",
+ // "pair": "BTC-USDT",
+ // "business_type": "swap",
+ // "trade_partition": "USDT",
+ // "position_mode": "dual_side"
+ // },
+ // ... opposite side position can be present here too (if hedge)
+ // ],
+ // "futures_contract_detail": [
+ // {
+ // "symbol": "BTC",
+ // "contract_code": "BTC-USDT-220624",
+ // "margin_position": "0",
+ // "margin_frozen": "0E-18",
+ // "margin_available": "1.497799766913531118",
+ // "profit_unreal": "0",
+ // "liquidation_price": null,
+ // "lever_rate": "30",
+ // "adjust_factor": "0.250000000000000000",
+ // "contract_type": "quarter",
+ // "pair": "BTC-USDT",
+ // "business_type": "futures",
+ // "trade_partition": "USDT"
+ // },
+ // ... other items listed with different expiration (contract_code)
+ // ],
+ // "margin_mode": "cross",
+ // "margin_account": "USDT",
+ // "margin_asset": "USDT",
+ // "margin_balance": "2.088199766913531118",
+ // "margin_static": "2.098199766913531118",
+ // "margin_position": "0.590400000000000000",
+ // "margin_frozen": "0E-18",
+ // "profit_real": "-0.016972710000000000",
+ // "profit_unreal": "-0.010000000000000000",
+ // "withdraw_available": "1.497799766913531118",
+ // "risk_rate": "9.105496355562965147",
+ // "contract_detail": [
+ // {
+ // "symbol": "BTC",
+ // "contract_code": "BTC-USDT",
+ // "margin_position": "0.590400000000000000",
+ // "margin_frozen": "0E-18",
+ // "margin_available": "1.497799766913531118",
+ // "profit_unreal": "-0.010000000000000000",
+ // "liquidation_price": "27625.176468365024050352",
+ // "lever_rate": "50",
+ // "adjust_factor": "0.350000000000000000",
+ // "contract_type": "swap",
+ // "pair": "BTC-USDT",
+ // "business_type": "swap",
+ // "trade_partition": "USDT"
+ // },
+ // ... all symbols listed
+ // ],
+ // "position_mode": "dual_side"
+ // },
+ // "ts": "1653604697466"
// }
//
} else {
@@ -5713,121 +5782,74 @@ module.exports = class huobi extends Exchange {
'future': 'contractPrivatePostApiV1ContractAccountPositionInfo',
'swap': 'contractPrivatePostSwapApiV1SwapAccountPositionInfo',
});
- // future
+ //
+ // future, swap
+ //
// {
- // status: 'ok',
- // data: [
+ // "status": "ok",
+ // "data": [
// {
- // symbol: 'BTC',
- // contract_code: 'BTC-USD',
- // margin_balance: 0.000752347253890835,
- // margin_position: 0.000705870726835087,
- // margin_frozen: 0,
- // margin_available: 0.000046476527055748,
- // profit_real: 0,
- // profit_unreal: -0.000004546248622,
- // risk_rate: 1.0508428311146076,
- // withdraw_available: 0.000046476527055748,
- // liquidation_price: 35017.91655851386,
- // lever_rate: 3,
- // adjust_factor: 0.015,
- // margin_static: 0.000756893502512835,
- // positions: [
- // {
- // symbol: 'BTC',
- // contract_code: 'BTC-USD',
- // volume: 1,
- // available: 1,
- // frozen: 0,
- // cost_open: 47150.000000000015,
- // cost_hold: 47324.6,
- // profit_unreal: -0.000004546248622,
- // profit_rate: 0.00463757067530574,
- // lever_rate: 3,
- // position_margin: 0.000705870726835087,
- // direction: 'buy',
- // profit: 0.0000032785936199,
- // last_price: 47223
- // }
- // ]
+ // "symbol": "XRP",
+ // "contract_code": "XRP-USD", // only present in swap
+ // "margin_balance": 12.186361450698276582,
+ // "margin_position": 5.036261079774375503,
+ // "margin_frozen": 0E-18,
+ // "margin_available": 7.150100370923901079,
+ // "profit_real": -0.012672343876723438,
+ // "profit_unreal": 0.163382354575000020,
+ // "risk_rate": 2.344723929650649798,
+ // "withdraw_available": 6.986718016348901059,
+ // "liquidation_price": 0.271625200493799547,
+ // "lever_rate": 5,
+ // "adjust_factor": 0.075000000000000000,
+ // "margin_static": 12.022979096123276562,
+ // "positions": [
+ // {
+ // "symbol": "XRP",
+ // "contract_code": "XRP-USD",
+ // // "contract_type": "this_week", // only present in future
+ // "volume": 1.0,
+ // "available": 1.0,
+ // "frozen": 0E-18,
+ // "cost_open": 0.394560000000000000,
+ // "cost_hold": 0.394560000000000000,
+ // "profit_unreal": 0.163382354575000020,
+ // "profit_rate": 0.032232070910556005,
+ // "lever_rate": 5,
+ // "position_margin": 5.036261079774375503,
+ // "direction": "buy",
+ // "profit": 0.163382354575000020,
+ // "last_price": 0.39712
+ // },
+ // ... opposite side position can be present here too (if hedge)
+ // ]
// }
// ],
- // ts: 1641162795228
+ // "ts": 1653600470199
// }
//
- // swap
+ // cross usdt swap
+ //
// {
- // status: 'ok',
- // data: [
- // {
- // positions: [
- // {
- // symbol: 'BTC',
- // contract_code: 'BTC-USDT',
- // volume: 1,
- // available: 1,
- // frozen: 0,
- // cost_open: 47027.1,
- // cost_hold: 47324.4,
- // profit_unreal: 0.1705,
- // profit_rate: -0.269631765513927,
- // lever_rate: 100,
- // position_margin: 0.471539,
- // direction: 'sell',
- // profit: -0.1268,
- // last_price: 47153.9,
- // margin_asset: 'USDT',
- // margin_mode: 'isolated',
- // margin_account: 'BTC-USDT'
- // }
- // ],
- // symbol: 'BTC',
- // margin_balance: 8.01274699,
- // margin_position: 0.471539,
- // margin_frozen: 0,
- // margin_available: 7.54120799,
- // profit_real: 0,
- // profit_unreal: 0.1705,
- // risk_rate: 16.442755615124092,
- // withdraw_available: 7.37070799,
- // liquidation_price: 54864.89009448036,
- // lever_rate: 100,
- // adjust_factor: 0.55,
- // margin_static: 7.84224699,
- // contract_code: 'BTC-USDT',
- // margin_asset: 'USDT',
- // margin_mode: 'isolated',
- // margin_account: 'BTC-USDT'
- // }
- // ],
- // ts: 1641162539767
+ // "status":"ok",
+ // "data":{
+ // "positions":[],
+ // "futures_contract_detail":[]
+ // "margin_mode":"cross",
+ // "margin_account":"USDT",
+ // "margin_asset":"USDT",
+ // "margin_balance":"1.000000000000000000",
+ // "margin_static":"1.000000000000000000",
+ // "margin_position":"0",
+ // "margin_frozen":"1.000000000000000000",
+ // "profit_real":"0E-18",
+ // "profit_unreal":"0",
+ // "withdraw_available":"0",
+ // "risk_rate":"15.666666666666666666",
+ // "contract_detail":[]
+ // },
+ // "ts":"1645521118946"
// }
- // cross usdt swap
- // {
- // "status":"ok",
- // "data":{
- // "positions":[
- // ],
- // "futures_contract_detail":[
- // (...)
- // ]
- // "margin_mode":"cross",
- // "margin_account":"USDT",
- // "margin_asset":"USDT",
- // "margin_balance":"1.000000000000000000",
- // "margin_static":"1.000000000000000000",
- // "margin_position":"0",
- // "margin_frozen":"1.000000000000000000",
- // "profit_real":"0E-18",
- // "profit_unreal":"0",
- // "withdraw_available":"0",
- // "risk_rate":"15.666666666666666666",
- // "contract_detail":[
- // (...)
- // ]
- // },
- // "ts":"1645521118946"
- // }
//
}
const request = {};
| this PR only includes updated (correct) examples now for all 4 combination/endpoints. we will make use of it when making next update for huobi's `fP/s` methods. | https://api.github.com/repos/ccxt/ccxt/pulls/13451 | 2022-05-26T22:57:07Z | 2022-05-28T00:57:12Z | 2022-05-28T00:57:12Z | 2022-06-04T21:58:40Z | 3,974 | ccxt/ccxt | 13,449 |
[generic] Unescape webpage contents | diff --git a/youtube_dl/extractor/generic.py b/youtube_dl/extractor/generic.py
index 6e632477921..0d02f836e12 100644
--- a/youtube_dl/extractor/generic.py
+++ b/youtube_dl/extractor/generic.py
@@ -145,6 +145,17 @@ class GenericIE(InfoExtractor):
'description': 'Episode 18: President Barack Obama sits down with Zach Galifianakis for his most memorable interview yet.',
}
},
+ # nowvideo embed hidden behind percent encoding
+ {
+ 'url': 'http://www.waoanime.tv/the-super-dimension-fortress-macross-episode-1/',
+ 'md5': '2baf4ddd70f697d94b1c18cf796d5107',
+ 'info_dict': {
+ 'id': '06e53103ca9aa',
+ 'ext': 'flv',
+ 'title': 'Macross Episode 001 Watch Macross Episode 001 onl',
+ 'description': 'No description',
+ },
+ }
]
def report_download_webpage(self, video_id):
@@ -291,6 +302,11 @@ def _real_extract(self, url):
except compat_xml_parse_error:
pass
+ # Sometimes embedded video player is hidden behind percent encoding
+ # (e.g. https://github.com/rg3/youtube-dl/issues/2448)
+ # Unescaping the whole page allows to handle those cases in a generic way
+ webpage = compat_urllib_parse.unquote(webpage)
+
# it's tempting to parse this further, but you would
# have to take into account all the variations like
# Video Title - Site Name
| This would allow handling generic webpages that hide embedded video iframe behind percent encoding (e.g. #2448).
```
document.write(unescape("%3c%69%66%72%61%6d%65%20%73%74%79%6c%65%3d%22%6f%76%65%72%66%6c%6f%77%3a%20%68%69%64%64%65%6e%3b%20%62%6f%72%64%65%72%3a%20%30%3b%20%77%69%64%74%68%3a%20%37%35%30%70%78%3b%20%68%65%69%67%68%74%3a%20%34%31%30%70%78%22%20%73%72%63%3d%22%68%74%74%70%3a%2f%2f%65%6d%62%65%64%2e%6e%6f%76%61%6d%6f%76%2e%63%6f%6d%2f%65%6d%62%65%64%2e%70%68%70%3f%77%69%64%74%68%3d%37%35%30%26%68%65%69%67%68%74%3d%34%31%30%26%76%3d%65%30%35%66%70%6c%70%63%74%7a%39%70%6b%26%70%78%3d%31%22%20%73%63%72%6f%6c%6c%69%6e%67%3d%22%6e%6f%22%3e%3c%2f%69%66%72%61%6d%65%3e"));
```
```
<iframe style="overflow: hidden; border: 0; width: 750px; height: 410px" src="http://embed.novamov.com/embed.php?width=750&height=410&v=e05fplpctz9pk&px=1" scrolling="no"></iframe>
```
Alternatively, we can do second pass using unescaped webpage after first (non-unescaped webpage) found nothing.
Not sure if this breaks something.
| https://api.github.com/repos/ytdl-org/youtube-dl/pulls/2451 | 2014-02-24T16:56:32Z | 2014-03-21T21:18:29Z | 2014-03-21T21:18:29Z | 2014-06-18T10:08:46Z | 397 | ytdl-org/youtube-dl | 50,532 |
Updated checkpoint support for Sagemaker Model Parallel | diff --git a/src/transformers/trainer.py b/src/transformers/trainer.py
index ad221e1d88197..43a1d750f33c7 100755
--- a/src/transformers/trainer.py
+++ b/src/transformers/trainer.py
@@ -18,6 +18,7 @@
import contextlib
import functools
+import glob
import inspect
import math
import os
@@ -1302,7 +1303,7 @@ def train(
if resume_from_checkpoint is None:
raise ValueError(f"No valid checkpoint found in output directory ({args.output_dir})")
- if resume_from_checkpoint is not None:
+ if resume_from_checkpoint is not None and not is_sagemaker_mp_enabled():
self._load_from_checkpoint(resume_from_checkpoint)
# If model was re-initialized, put it on the right device and update self.model_wrapped
@@ -1401,6 +1402,9 @@ def _inner_training_loop(
model = self._wrap_model(self.model_wrapped)
+ if is_sagemaker_mp_enabled() and resume_from_checkpoint is not None:
+ self._load_from_checkpoint(resume_from_checkpoint, model)
+
# for the rest of this function `model` is the outside model, whether it was wrapped or not
if model is not self.model:
self.model_wrapped = model
@@ -1666,6 +1670,8 @@ def _inner_training_loop(
xm.rendezvous("load_best_model_at_end")
elif args.local_rank != -1:
dist.barrier()
+ elif is_sagemaker_mp_enabled():
+ smp.barrier()
self._load_best_model()
@@ -1688,7 +1694,12 @@ def _inner_training_loop(
return TrainOutput(self.state.global_step, train_loss, metrics)
- def _load_from_checkpoint(self, resume_from_checkpoint):
+ def _load_from_checkpoint(self, resume_from_checkpoint, model=None):
+
+ if model is None:
+ model = self.model
+ strict_load = is_sagemaker_mp_enabled()
+
if not os.path.isfile(os.path.join(resume_from_checkpoint, WEIGHTS_NAME)) and not os.path.isfile(
os.path.join(resume_from_checkpoint, WEIGHTS_INDEX_NAME)
):
@@ -1713,20 +1724,22 @@ def _load_from_checkpoint(self, resume_from_checkpoint):
# We load the model state dict on the CPU to avoid an OOM error.
state_dict = torch.load(os.path.join(resume_from_checkpoint, WEIGHTS_NAME), map_location="cpu")
# If the model is on the GPU, it still works!
- load_result = self.model.load_state_dict(state_dict, strict=False)
- self._issue_warnings_after_load(load_result)
-
+ load_result = model.load_state_dict(state_dict, strict=strict_load)
+ if not strict_load:
+ self._issue_warnings_after_load(load_result)
# release memory
del state_dict
else:
# We load the sharded checkpoint
- load_result = load_sharded_checkpoint(self.model, resume_from_checkpoint, strict=False)
- self._issue_warnings_after_load(load_result)
+ load_result = load_sharded_checkpoint(model, resume_from_checkpoint, strict=strict_load)
+ if not strict_load:
+ self._issue_warnings_after_load(load_result)
def _load_best_model(self):
logger.info(f"Loading best model from {self.state.best_model_checkpoint} (score: {self.state.best_metric}).")
-
best_model_path = os.path.join(self.state.best_model_checkpoint, WEIGHTS_NAME)
+ strict_load = is_sagemaker_mp_enabled()
+ model = self.model_wrapped if is_sagemaker_mp_enabled() else self.model
if os.path.exists(best_model_path):
if self.deepspeed:
# temp hack until Deepspeed fixes the problem with resume from an existing engine that did some stepping
@@ -1743,12 +1756,13 @@ def _load_best_model(self):
# We load the model state dict on the CPU to avoid an OOM error.
state_dict = torch.load(best_model_path, map_location="cpu")
# If the model is on the GPU, it still works!
- load_result = self.model.load_state_dict(state_dict, strict=False)
- self._issue_warnings_after_load(load_result)
+ load_result = model.load_state_dict(state_dict, strict=strict_load)
+ if not strict_load:
+ self._issue_warnings_after_load(load_result)
elif os.path.exists(os.path.join(self.state.best_model_checkpoint, WEIGHTS_INDEX_NAME)):
- # Best model is a sharded checkpoint
- load_result = load_sharded_checkpoint(self.model, self.state.best_model_checkpoint, strict=False)
- self._issue_warnings_after_load(load_result)
+ load_result = load_sharded_checkpoint(model, self.state.best_model_checkpoint, strict=strict_load)
+ if not strict_load:
+ self._issue_warnings_after_load(load_result)
else:
logger.warning(
f"Could not locate the best model at {best_model_path}, if you are running a distributed training "
@@ -1886,17 +1900,21 @@ def _save_checkpoint(self, model, trial, metrics=None):
xm.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME))
reissue_pt_warnings(caught_warnings)
elif is_sagemaker_mp_enabled():
- if smp.rdp_rank() == 0:
- # Consolidate the state dict on all processed of rdp_rank 0
- opt_state_dict = self.optimizer.state_dict()
- # Save it and the scheduler on the main process
- if self.args.should_save:
- torch.save(opt_state_dict, os.path.join(output_dir, OPTIMIZER_NAME))
- with warnings.catch_warnings(record=True) as caught_warnings:
- torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME))
- reissue_pt_warnings(caught_warnings)
- if self.do_grad_scaling:
- torch.save(self.scaler.state_dict(), os.path.join(output_dir, SCALER_NAME))
+ opt_state_dict = self.optimizer.local_state_dict(gather_if_shard=False)
+ smp.barrier()
+ if smp.rdp_rank() == 0 or smp.state.cfg.shard_optimizer_state:
+ smp.save(
+ opt_state_dict,
+ os.path.join(output_dir, OPTIMIZER_NAME),
+ partial=True,
+ v3=smp.state.cfg.shard_optimizer_state,
+ )
+ if self.args.should_save:
+ with warnings.catch_warnings(record=True) as caught_warnings:
+ torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME))
+ reissue_pt_warnings(caught_warnings)
+ if self.do_grad_scaling:
+ torch.save(self.scaler.state_dict(), os.path.join(output_dir, SCALER_NAME))
elif self.args.should_save and not self.deepspeed:
# deepspeed.save_checkpoint above saves model/optim/sched
torch.save(self.optimizer.state_dict(), os.path.join(output_dir, OPTIMIZER_NAME))
@@ -1945,6 +1963,7 @@ def _save_checkpoint(self, model, trial, metrics=None):
# A process can arrive here before the process 0 has a chance to save the model, in which case output_dir may
# not yet exist.
os.makedirs(output_dir, exist_ok=True)
+
local_rank = xm.get_local_ordinal() if is_torch_tpu_available() else self.args.local_rank
if local_rank == -1:
torch.save(rng_states, os.path.join(output_dir, "rng_state.pth"))
@@ -1967,9 +1986,12 @@ def _load_optimizer_and_scheduler(self, checkpoint):
# deepspeed loads optimizer/lr_scheduler together with the model in deepspeed_init
return
- if os.path.isfile(os.path.join(checkpoint, OPTIMIZER_NAME)) and os.path.isfile(
- os.path.join(checkpoint, SCHEDULER_NAME)
- ):
+ checkpoint_file_exists = (
+ glob.glob(os.path.join(checkpoint, OPTIMIZER_NAME) + "_*")
+ if is_sagemaker_mp_enabled()
+ else os.path.isfile(os.path.join(checkpoint, OPTIMIZER_NAME))
+ )
+ if checkpoint_file_exists and os.path.isfile(os.path.join(checkpoint, SCHEDULER_NAME)):
# Load in optimizer and scheduler states
if is_torch_tpu_available():
# On TPU we have to take some extra precautions to properly load the states on the right device.
@@ -1985,9 +2007,16 @@ def _load_optimizer_and_scheduler(self, checkpoint):
self.lr_scheduler.load_state_dict(lr_scheduler_state)
else:
map_location = "cpu" if is_sagemaker_mp_enabled() else self.args.device
- self.optimizer.load_state_dict(
- torch.load(os.path.join(checkpoint, OPTIMIZER_NAME), map_location=map_location)
- )
+ if is_sagemaker_mp_enabled():
+
+ def opt_load_hook(mod, opt):
+ opt.load_state_dict(smp.load(os.path.join(checkpoint, OPTIMIZER_NAME), partial=True))
+
+ self.model_wrapped.register_post_step_hook(opt_load_hook)
+ else:
+ self.optimizer.load_state_dict(
+ torch.load(os.path.join(checkpoint, OPTIMIZER_NAME), map_location=map_location)
+ )
with warnings.catch_warnings(record=True) as caught_warnings:
self.lr_scheduler.load_state_dict(torch.load(os.path.join(checkpoint, SCHEDULER_NAME)))
reissue_pt_warnings(caught_warnings)
| # What does this PR do?
<!--
Congratulations! You've made it this far! You're not quite done yet though.
Once merged, your PR is going to appear in the release notes with the title you set, so make sure it's a great title that fully reflects the extent of your awesome contribution.
Then, please replace this with a description of the change and which issue is fixed (if applicable). Please also include relevant motivation and context. List any dependencies (if any) that are required for this change.
Once you're done, someone will review your PR shortly (see the section "Who can review?" below to tag some potential reviewers). They may suggest changes to make the code even better. If no one reviewed your PR after a week has passed, don't hesitate to post a new comment @-mentioning the same persons---sometimes notifications get lost.
-->
<!-- Remove if not applicable -->
This PR updates SMP checkpoint support. With these changes SMP optimizer state checkpoints will be saved partially while SMP model weights will be saved in full. Since weights are saved in full, checkpoint behavior will be compatible with `save_pretrained` and `shard_checkpoint`.
- Uses `local_state_dict()` with partial optimizer state saving.
- Uses `smp.save` optimizer state saving for SMP.
- Uses `smp.load `when loading optimizer state saving for SMP.
- Reorders weight loading to happen after wrapping of model for SMP.
- Updated checks for the existence of optimizer checkpoint files since smp partial checkpoints contain postfixes in addition to filename(example: `filename_0_0` or `filename_0_0_0`).
- adds `load_best_model_at_end` support for SMP
This PR is created based on the feedback from [previous PR on partial checkpoint support for SMP:](https://github.com/huggingface/transformers/pull/16950)
## Before submitting
- [x] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case).
- [x] Did you read the [contributor guideline](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md#start-contributing-pull-requests),
Pull Request section?
- [ ] Was this discussed/approved via a Github issue or the [forum](https://discuss.huggingface.co/)? Please add a link
to it if that's the case.
- [ ] Did you make sure to update the documentation with your changes? Here are the
[documentation guidelines](https://github.com/huggingface/transformers/tree/main/docs), and
[here are tips on formatting docstrings](https://github.com/huggingface/transformers/tree/main/docs#writing-source-documentation).
- [ ] Did you write any new necessary tests?
## Who can review?
Anyone in the community is free to review the PR once the tests have passed. Feel free to tag
members/contributors who may be interested in your PR.
<!-- Your PR will be replied to more quickly if you can figure out the right person to tag with @
If you know how to use git blame, that is the easiest way, otherwise, here is a rough guide of **who to tag**.
Please tag fewer than 3 people.
Models:
- albert, bert, xlm: @LysandreJik
- blenderbot, bart, marian, pegasus, encoderdecoder, t5: @patrickvonplaten, @patil-suraj
- longformer, reformer, transfoxl, xlnet: @patrickvonplaten
- fsmt: @stas00
- funnel: @sgugger
- gpt2: @patrickvonplaten, @LysandreJik
- rag: @patrickvonplaten, @lhoestq
- tensorflow: @LysandreJik
Library:
- benchmarks: @patrickvonplaten
- deepspeed: @stas00
- ray/raytune: @richardliaw, @amogkam
- text generation: @patrickvonplaten
- tokenizers: @n1t0, @LysandreJik
- trainer: @sgugger
- pipelines: @LysandreJik
Documentation: @sgugger
HF projects:
- datasets: [different repo](https://github.com/huggingface/datasets)
- rust tokenizers: [different repo](https://github.com/huggingface/tokenizers)
Examples:
- maintained examples (not research project or legacy): @sgugger, @patil-suraj
- research_projects/bert-loses-patience: @JetRunner
- research_projects/distillation: @VictorSanh
-->
| https://api.github.com/repos/huggingface/transformers/pulls/17219 | 2022-05-12T21:31:51Z | 2022-05-16T12:17:26Z | 2022-05-16T12:17:26Z | 2022-05-16T12:25:47Z | 2,093 | huggingface/transformers | 12,096 |
tell users to use -t py310 | diff --git a/CHANGES.md b/CHANGES.md
index e5f4a1fdf82..dae7df7c9d5 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -4,6 +4,7 @@
### _Black_
+- Point users to using `--target-version py310` if we detect 3.10-only syntax (#2668)
- Cell magics are now only processed if they are known Python cell magics. Earlier, all
cell magics were tokenized, leading to possible indentation errors e.g. with
`%%writefile`. (#2630)
diff --git a/src/black/parsing.py b/src/black/parsing.py
index e38405637cd..825c50eb6d5 100644
--- a/src/black/parsing.py
+++ b/src/black/parsing.py
@@ -42,6 +42,11 @@
ast3 = ast27 = ast
+PY310_HINT: Final[
+ str
+] = "Consider using --target-version py310 to parse Python 3.10 code."
+
+
class InvalidInput(ValueError):
"""Raised when input source code fails all parse attempts."""
@@ -95,7 +100,8 @@ def lib2to3_parse(src_txt: str, target_versions: Iterable[TargetVersion] = ()) -
if not src_txt.endswith("\n"):
src_txt += "\n"
- for grammar in get_grammars(set(target_versions)):
+ grammars = get_grammars(set(target_versions))
+ for grammar in grammars:
drv = driver.Driver(grammar)
try:
result = drv.parse_string(src_txt, True)
@@ -110,6 +116,12 @@ def lib2to3_parse(src_txt: str, target_versions: Iterable[TargetVersion] = ()) -
faulty_line = "<line number missing in source>"
exc = InvalidInput(f"Cannot parse: {lineno}:{column}: {faulty_line}")
else:
+ if pygram.python_grammar_soft_keywords not in grammars and matches_grammar(
+ src_txt, pygram.python_grammar_soft_keywords
+ ):
+ original_msg = exc.args[0]
+ msg = f"{original_msg}\n{PY310_HINT}"
+ raise InvalidInput(msg) from None
raise exc from None
if isinstance(result, Leaf):
@@ -117,6 +129,16 @@ def lib2to3_parse(src_txt: str, target_versions: Iterable[TargetVersion] = ()) -
return result
+def matches_grammar(src_txt: str, grammar: Grammar) -> bool:
+ drv = driver.Driver(grammar)
+ try:
+ drv.parse_string(src_txt, True)
+ except ParseError:
+ return False
+ else:
+ return True
+
+
def lib2to3_unparse(node: Node) -> str:
"""Given a lib2to3 node, return its string representation."""
code = str(node)
diff --git a/tests/test_format.py b/tests/test_format.py
index d44be1e8712..30099aaf1bc 100644
--- a/tests/test_format.py
+++ b/tests/test_format.py
@@ -210,6 +210,15 @@ def test_patma_invalid() -> None:
exc_info.match("Cannot parse: 10:11")
+def test_patma_hint() -> None:
+ source, expected = read_data("pattern_matching_simple")
+ mode = black.Mode(target_versions={black.TargetVersion.PY39})
+ with pytest.raises(black.parsing.InvalidInput) as exc_info:
+ assert_format(source, expected, mode, minimum_version=(3, 10))
+
+ exc_info.match(black.parsing.PY310_HINT)
+
+
def test_docstring_no_string_normalization() -> None:
"""Like test_docstring but with string normalization off."""
source, expected = read_data("docstring_no_string_normalization")
| https://api.github.com/repos/psf/black/pulls/2668 | 2021-12-03T22:11:54Z | 2021-12-04T23:30:23Z | 2021-12-04T23:30:23Z | 2021-12-04T23:31:08Z | 856 | psf/black | 24,108 |
|
Fix st.cache | diff --git a/lib/streamlit/caching.py b/lib/streamlit/caching.py
index 6e842516ba6f..833602e03a92 100644
--- a/lib/streamlit/caching.py
+++ b/lib/streamlit/caching.py
@@ -28,9 +28,9 @@
import threading
import time
from collections import namedtuple
-from typing import Any, Dict
+from typing import Any, Dict, Optional
-from cachetools import LRUCache, TTLCache
+from cachetools import TTLCache
import streamlit as st
from streamlit.util import functools_wraps
@@ -56,12 +56,10 @@
LOGGER = get_logger(__name__)
-# The timer function we use with TTLCache.
+# The timer function we use with TTLCache. This is the default timer func, but
+# is exposed here as a constant so that it can be patched in unit tests.
TTLCACHE_TIMER = time.monotonic
-# A list of all mem-caches we've created.
-_all_mem_caches = []
-
class CacheError(Exception):
pass
@@ -80,6 +78,64 @@ def __init__(self, cached_value):
DiskCacheEntry = namedtuple("DiskCacheEntry", ["value"])
+class _MemCaches(object):
+ """Manages all in-memory st.cache caches"""
+
+ def __init__(self):
+ # Contains a cache object for each st.cache'd function
+ self._lock = threading.RLock()
+ self._function_caches = {} # type: Dict[str, TTLCache]
+
+ def get_cache(
+ self, key: str, max_entries: Optional[float], ttl: Optional[float]
+ ) -> TTLCache:
+ """Return the mem cache for the given key.
+
+ If it doesn't exist, create a new one with the given params.
+ """
+
+ if max_entries is None:
+ max_entries = math.inf
+ if ttl is None:
+ ttl = math.inf
+
+ if not isinstance(max_entries, (int, float)):
+ raise RuntimeError("max_entries must be an int")
+ if not isinstance(ttl, (int, float)):
+ raise RuntimeError("ttl must be a float")
+
+ # Get the existing cache, if it exists, and validate that its params
+ # haven't changed.
+ with self._lock:
+ mem_cache = self._function_caches.get(key)
+ if (
+ mem_cache is not None
+ and mem_cache.ttl == ttl
+ and mem_cache.maxsize == max_entries
+ ):
+ return mem_cache
+
+ # Create a new cache object and put it in our dict
+ LOGGER.debug(
+ "Creating new mem_cache (key=%s, max_entries=%s, ttl=%s)",
+ key,
+ max_entries,
+ ttl,
+ )
+ mem_cache = TTLCache(maxsize=max_entries, ttl=ttl, timer=TTLCACHE_TIMER)
+ self._function_caches[key] = mem_cache
+ return mem_cache
+
+ def clear(self) -> None:
+ """Clear all caches"""
+ with self._lock:
+ self._function_caches = {}
+
+
+# Our singleton _MemCaches instance
+_mem_caches = _MemCaches()
+
+
# A thread-local counter that's incremented when we enter @st.cache
# and decremented when we exit.
class ThreadLocalCacheInfo(threading.local):
@@ -206,34 +262,6 @@ def _get_mutated_output_error_message():
return message
-def _create_mem_cache(max_entries, ttl):
- """Create an in-memory cache object with the given parameters."""
- if max_entries is None and ttl is None:
- # If we have no max_entries or TTL, we can just use a regular dict.
- mem_cache = {}
- else:
- if max_entries is None:
- max_entries = math.inf
- elif not isinstance(max_entries, int):
- raise Exception("`max_entries` must be an int or None")
-
- if not isinstance(ttl, (float, int)) and ttl is not None:
- raise Exception("`ttl` must be a float or None")
-
- # If ttl is none, just create an LRUCache. (TTLCache is simply an
- # LRUCache that adds a ttl option.)
- if ttl is None:
- mem_cache = LRUCache(maxsize=max_entries)
- else:
- mem_cache = TTLCache(maxsize=max_entries, ttl=ttl, timer=TTLCACHE_TIMER)
-
- # Stick the new cache in our global list
- global _all_mem_caches
- _all_mem_caches.append(mem_cache)
-
- return mem_cache
-
-
def _read_from_mem_cache(mem_cache, key, allow_output_mutation, hash_funcs):
if key in mem_cache:
entry = mem_cache[key]
@@ -445,8 +473,36 @@ def cache(
ttl=ttl,
)
- # Create the function's in-memory cache.
- mem_cache = _create_mem_cache(max_entries, ttl)
+ # Create the unique key for this function's cache. The cache will be
+ # retrieved from inside the wrapped function.
+ #
+ # A naive implementation would involve simply creating the cache object
+ # right here in the wrapper, which in a normal Python script would be
+ # executed only once. But in Streamlit, we reload all modules related to a
+ # user's app when the app is re-run, which means that - among other
+ # things - all function decorators in the app will be re-run, and so any
+ # decorator-local objects will be recreated.
+ #
+ # Furthermore, our caches can be destroyed and recreated (in response
+ # to cache clearing, for example), which means that retrieving the
+ # function's cache here (so that the wrapped function can save a lookup)
+ # is incorrect: the cache itself may be recreated between
+ # decorator-evaluation time and decorated-function-execution time. So
+ # we must retrieve the cache object *and* perform the cached-value lookup
+ # inside the decorated function.
+
+ func_hasher = CodeHasher("md5", None, hash_funcs)
+ # Include the function's module and qualified name in the hash.
+ # This means that two identical functions in different modules
+ # will not share a hash; it also means that two identical *nested*
+ # functions in the same module will not share a hash.
+ func_hasher.update(func.__module__)
+ func_hasher.update(func.__qualname__)
+ func_hasher.update(func)
+ cache_key = func_hasher.hexdigest()
+ LOGGER.debug(
+ "mem_cache key for %s.%s: %s", func.__module__, func.__qualname__, cache_key
+ )
@functools_wraps(func)
def wrapped_func(*args, **kwargs):
@@ -458,7 +514,7 @@ def wrapped_func(*args, **kwargs):
LOGGER.debug("Purposefully skipping cache")
return func(*args, **kwargs)
- name = func.__name__
+ name = func.__qualname__
if len(args) == 0 and len(kwargs) == 0:
message = "Running %s()." % name
@@ -466,23 +522,34 @@ def wrapped_func(*args, **kwargs):
message = "Running %s(...)." % name
def get_or_create_cached_value():
- hasher = hashlib.new("md5")
-
- args_hasher = CodeHasher("md5", hasher, hash_funcs)
+ # First, get the cache that's attached to this function.
+ # This cache's key is generated (above) from the function's code.
+ global _mem_caches
+ mem_cache = _mem_caches.get_cache(cache_key, max_entries, ttl)
+
+ # Next, calculate the key for the value we'll be searching for
+ # within that cache. This key is generated from both the function's
+ # code and the arguments that are passed into it. (Even though this
+ # key is used to index into a per-function cache, it must be
+ # globally unique, because it is *also* used for a global on-disk
+ # cache that is *not* per-function.)
+ value_hasher = hashlib.new("md5")
+
+ args_hasher = CodeHasher("md5", value_hasher, hash_funcs)
args_hasher.update([args, kwargs])
LOGGER.debug("Hashing arguments to %s of %i bytes.", name, args_hasher.size)
- code_hasher = CodeHasher("md5", hasher, hash_funcs)
+ code_hasher = CodeHasher("md5", value_hasher, hash_funcs)
code_hasher.update(func)
LOGGER.debug("Hashing function %s in %i bytes.", name, code_hasher.size)
- key = hasher.hexdigest()
- LOGGER.debug("Cache key: %s", key)
+ value_key = value_hasher.hexdigest()
+ LOGGER.debug("Cache key: %s", value_key)
try:
return_value = _read_from_cache(
mem_cache=mem_cache,
- key=key,
+ key=value_key,
persisted=persist,
allow_output_mutation=allow_output_mutation,
hash_funcs=hash_funcs,
@@ -500,7 +567,7 @@ def get_or_create_cached_value():
_write_to_cache(
mem_cache=mem_cache,
- key=key,
+ key=value_key,
value=return_value,
persist=persist,
allow_output_mutation=allow_output_mutation,
@@ -556,7 +623,7 @@ class Cache(Dict[Any, Any]):
def __init__(self, persist=False, allow_output_mutation=False):
self._persist = persist
self._allow_output_mutation = allow_output_mutation
- self._mem_cache = _create_mem_cache(None, None)
+ self._mem_cache = {}
dict.__init__(self)
@@ -686,7 +753,5 @@ def _clear_disk_cache():
def _clear_mem_cache():
- global _all_mem_caches
- # Copy _all_mem_caches to guard against threading errors
- for mem_cache in list(_all_mem_caches):
- mem_cache.clear()
+ global _mem_caches
+ _mem_caches.clear()
diff --git a/lib/tests/streamlit/scriptrunner/ScriptRunner_test.py b/lib/tests/streamlit/scriptrunner/ScriptRunner_test.py
index c88e57b662b1..8a9a7f08313f 100644
--- a/lib/tests/streamlit/scriptrunner/ScriptRunner_test.py
+++ b/lib/tests/streamlit/scriptrunner/ScriptRunner_test.py
@@ -314,6 +314,36 @@ def test_multiple_scriptrunners(self):
],
)
+ def test_rerun_caching(self):
+ """Test that st.caches are maintained across script runs."""
+
+ # Run st_cache_script.
+ runner = TestScriptRunner("st_cache_script.py")
+ runner.enqueue_rerun()
+ runner.start()
+ runner.join()
+
+ # The script has 4 cached functions, each of which writes out
+ # the same text.
+ self._assert_text_deltas(
+ runner,
+ [
+ "cached function called",
+ "cached function called",
+ "cached function called",
+ "cached function called",
+ ],
+ )
+
+ # Re-run the script on a second runner.
+ runner = TestScriptRunner("st_cache_script.py")
+ runner.enqueue_rerun()
+ runner.start()
+ runner.join()
+
+ # The cached functions should not have been called on this second run
+ self._assert_text_deltas(runner, [])
+
def _assert_no_exceptions(self, scriptrunner):
"""Asserts that no uncaught exceptions were thrown in the
scriptrunner's run thread.
diff --git a/lib/tests/streamlit/scriptrunner/test_data/st_cache_script.py b/lib/tests/streamlit/scriptrunner/test_data/st_cache_script.py
new file mode 100644
index 000000000000..7dce0be66b36
--- /dev/null
+++ b/lib/tests/streamlit/scriptrunner/test_data/st_cache_script.py
@@ -0,0 +1,60 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018-2020 Streamlit Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""A script for ScriptRunnerTest that uses st.cache"""
+
+import streamlit as st
+
+# Except for their names, these functions all intentionally have the same
+# bodies - so their ASTs should hash to the same values. However, they should
+# *not* share the same caches, because we include a function's qualified
+# name in its cache key. To test that this is true, the associated ScriptRunner
+# test should run the script twice:
+# - On the first run, "cached function called" should be produced 4 times
+# - On the second run, "cached function called" should not be produced
+
+
[email protected](suppress_st_warning=True)
+def cached1():
+ st.text("cached function called")
+ return "cached value"
+
+
[email protected](suppress_st_warning=True)
+def cached2():
+ st.text("cached function called")
+ return "cached value"
+
+
+def outer_func():
+ # These closures share the names and bodies of the functions in the outer
+ # scope, but they should have their own independent caches.
+ @st.cache(suppress_st_warning=True)
+ def cached1():
+ st.text("cached function called")
+ return "cached value"
+
+ @st.cache(suppress_st_warning=True)
+ def cached2():
+ st.text("cached function called")
+ return "cached value"
+
+ cached1()
+ cached2()
+
+
+cached1()
+cached2()
+outer_func()
| This fixes breakage that I introduced in #1152
Rather than creating the cache as a local variable in the st.cache wrapper, we instead manage all caches in a global `_MemCaches` class. Each cache is keyed off its wrapped function's fully qualified name, and its contents.
There's a new `ScriptRunner` test that makes sure that caches are reused across multiple runs of the same script. | https://api.github.com/repos/streamlit/streamlit/pulls/1208 | 2020-03-10T02:40:33Z | 2020-03-10T23:26:02Z | 2020-03-10T23:26:01Z | 2020-03-10T23:26:05Z | 3,257 | streamlit/streamlit | 21,771 |
Add wildcard example | diff --git a/certbot-dns-digitalocean/certbot_dns_digitalocean/__init__.py b/certbot-dns-digitalocean/certbot_dns_digitalocean/__init__.py
index 2cb7a92de05..8fd50919624 100644
--- a/certbot-dns-digitalocean/certbot_dns_digitalocean/__init__.py
+++ b/certbot-dns-digitalocean/certbot_dns_digitalocean/__init__.py
@@ -77,6 +77,15 @@
-d example.com \\
-d www.example.com
+.. code-block:: bash
+ :caption: To acquire a wildcard certificate for ``*.example.com``
+
+ certbot certonly \\
+ --dns-digitalocean \\
+ --dns-digitalocean-credentials ~/.secrets/certbot/digitalocean.ini \\
+ -d example.com \\
+ -d '*.example.com'
+
.. code-block:: bash
:caption: To acquire a certificate for ``example.com``, waiting 60 seconds
for DNS propagation
| ## Pull Request Checklist
- [X] If the change being made is to a [distributed component](https://certbot.eff.org/docs/contributing.html#code-components-and-layout), edit the `master` section of `certbot/CHANGELOG.md` to include a description of the change being made.
- [X] Add or update any documentation as needed to support the changes in this PR.
- [X] Include your name in `AUTHORS.md` if you like.
| https://api.github.com/repos/certbot/certbot/pulls/9164 | 2022-01-09T18:16:32Z | 2022-01-18T22:20:26Z | 2022-01-18T22:20:26Z | 2022-01-18T22:20:26Z | 246 | certbot/certbot | 161 |
Call error handler and mark flow on HTTPException | diff --git a/mitmproxy/proxy/protocol/http.py b/mitmproxy/proxy/protocol/http.py
index 16d04eeb1c..154646cd03 100644
--- a/mitmproxy/proxy/protocol/http.py
+++ b/mitmproxy/proxy/protocol/http.py
@@ -276,6 +276,8 @@ def _process_flow(self, f):
# We optimistically guess there might be an HTTP client on the
# other end
self.send_error_response(400, repr(e))
+ f.error = flow.Error(str(e))
+ self.channel.ask("error", f)
raise exceptions.ProtocolException(
"HTTP protocol error in client request: {}".format(e)
)
| Hi,
This allows scripts to handle HTTPException exceptions such as "HTTP Body too large" raised in [mitmproxy/net/http/http1/read.py:131](https://github.com/mitmproxy/mitmproxy/blob/master/mitmproxy/net/http/http1/read.py#L131).
This simply follows how ProtocolException exceptions are [handled](https://github.com/mitmproxy/mitmproxy/blob/master/mitmproxy/proxy/protocol/http.py#L212).
It also marks the flow with the error [as is done elsewhere](https://github.com/mitmproxy/mitmproxy/blob/master/mitmproxy/proxy/protocol/http.py#L211).
I would like to handle errors like this and I couldn't see any other way. If this commit makes sense to you and doesn't break anything I would like to get it merged in.
I'd be happy to add a test if someone could help suggest where it should go and maybe point to an existing test that might help me get started.
Cheers | https://api.github.com/repos/mitmproxy/mitmproxy/pulls/2082 | 2017-03-01T15:59:21Z | 2017-03-04T14:12:30Z | 2017-03-04T14:12:30Z | 2017-03-04T14:12:30Z | 157 | mitmproxy/mitmproxy | 27,920 |
⬆️ Upgrade GitHub Action label-approved | diff --git a/.github/workflows/label-approved.yml b/.github/workflows/label-approved.yml
index 2113c468ac835..1138e6043436d 100644
--- a/.github/workflows/label-approved.yml
+++ b/.github/workflows/label-approved.yml
@@ -13,6 +13,6 @@ jobs:
env:
GITHUB_CONTEXT: ${{ toJson(github) }}
run: echo "$GITHUB_CONTEXT"
- - uses: docker://tiangolo/label-approved:0.0.2
+ - uses: docker://tiangolo/label-approved:0.0.3
with:
token: ${{ secrets.FASTAPI_LABEL_APPROVED }}
| ⬆️ Upgrade GitHub Action label-approved | https://api.github.com/repos/tiangolo/fastapi/pulls/10905 | 2024-01-08T18:43:19Z | 2024-01-08T18:49:54Z | 2024-01-08T18:49:54Z | 2024-01-08T18:49:54Z | 157 | tiangolo/fastapi | 23,345 |
Update snakeLadder.py | diff --git a/BoardGame-CLI/snakeLadder.py b/BoardGame-CLI/snakeLadder.py
index 793cf98a2a..d8892ed433 100644
--- a/BoardGame-CLI/snakeLadder.py
+++ b/BoardGame-CLI/snakeLadder.py
@@ -70,7 +70,7 @@ def play():
looproll = roll()
temp1 += looproll
print(f"you got {looproll} ")
- if counter_6 = 3 :
+ if counter_6 == 3 :
temp1 -= 18
print("Three consectutives 6 got cancelled")
print("")
| Not the assignment operator | https://api.github.com/repos/geekcomputers/Python/pulls/1926 | 2023-07-16T16:49:48Z | 2023-07-20T16:17:11Z | 2023-07-20T16:17:10Z | 2023-07-20T16:41:49Z | 150 | geekcomputers/Python | 31,811 |
[Tiny]Remove duplicated assignment | diff --git a/java/runtime/src/main/java/io/ray/runtime/RayNativeRuntime.java b/java/runtime/src/main/java/io/ray/runtime/RayNativeRuntime.java
index f7018826702b8..1d8d60e1048f2 100644
--- a/java/runtime/src/main/java/io/ray/runtime/RayNativeRuntime.java
+++ b/java/runtime/src/main/java/io/ray/runtime/RayNativeRuntime.java
@@ -87,8 +87,6 @@ public void start() {
gcsClient = new GcsClient(rayConfig.getRedisAddress(), rayConfig.redisPassword);
}
- gcsClient = new GcsClient(rayConfig.getRedisAddress(), rayConfig.redisPassword);
-
if (rayConfig.workerMode == WorkerType.DRIVER) {
GcsNodeInfo nodeInfo = gcsClient.getNodeToConnectForDriver(rayConfig.nodeIp);
rayConfig.rayletSocketName = nodeInfo.getRayletSocketName();
| <!-- Thank you for your contribution! Please review https://github.com/ray-project/ray/blob/master/CONTRIBUTING.rst before opening a pull request. -->
<!-- Please add a reviewer to the assignee section when you create a PR. If you don't have the access to it, we will shortly find a reviewer and assign them to your PR. -->
## Why are these changes needed?
<!-- Please give a short summary of the change and the problem this solves. -->
## Related issue number
<!-- For example: "Closes #1234" -->
## Checks
- [x] I've run `scripts/format.sh` to lint the changes in this PR.
- [x] I've included any doc changes needed for https://docs.ray.io/en/master/.
- [x] I've made sure the tests are passing. Note that there might be a few flaky tests, see the recent failures at https://flakey-tests.ray.io/
- Testing Strategy
- [x] Unit tests
- [ ] Release tests
- [ ] This PR is not tested :(
| https://api.github.com/repos/ray-project/ray/pulls/19866 | 2021-10-29T03:59:31Z | 2021-11-01T03:44:01Z | 2021-11-01T03:44:01Z | 2021-11-01T03:44:03Z | 205 | ray-project/ray | 19,219 |
Mention Python 3.6 support plan change | diff --git a/certbot/CHANGELOG.md b/certbot/CHANGELOG.md
index 73d2a5a7cba..0746c8e8e28 100644
--- a/certbot/CHANGELOG.md
+++ b/certbot/CHANGELOG.md
@@ -13,7 +13,9 @@ Certbot adheres to [Semantic Versioning](https://semver.org/).
### Changed
-*
+* We previously said we'd drop Python 3.6 support in this release. This release
+ still supports Python 3.6, however, support is still deprecated and we plan
+ to completely remove support in a future release.
### Fixed
| I have https://github.com/certbot/certbot/pull/9216, however, I see no real reason to rush things. I would like one of these two PRs to land before the release tomorrow. | https://api.github.com/repos/certbot/certbot/pulls/9219 | 2022-02-28T15:59:48Z | 2022-02-28T17:51:45Z | 2022-02-28T17:51:45Z | 2022-02-28T17:51:59Z | 153 | certbot/certbot | 136 |
Help clarify version number | diff --git a/certbot-apache/certbot_apache/tests/http_01_test.py b/certbot-apache/certbot_apache/tests/http_01_test.py
index f120674c7e3..dc1ca34d698 100644
--- a/certbot-apache/certbot_apache/tests/http_01_test.py
+++ b/certbot-apache/certbot_apache/tests/http_01_test.py
@@ -50,7 +50,7 @@ def test_empty_perform(self):
self.assertFalse(self.http.perform())
@mock.patch("certbot_apache.configurator.ApacheConfigurator.enable_mod")
- def test_enable_modules_22(self, mock_enmod):
+ def test_enable_modules_apache_2_2(self, mock_enmod):
self.config.version = (2, 2)
self.config.parser.modules.remove("authz_host_module")
self.config.parser.modules.remove("mod_authz_host.c")
@@ -59,7 +59,7 @@ def test_enable_modules_22(self, mock_enmod):
self.assertEqual(enmod_calls[0][0][0], "authz_host")
@mock.patch("certbot_apache.configurator.ApacheConfigurator.enable_mod")
- def test_enable_modules_24(self, mock_enmod):
+ def test_enable_modules_apache_2_4(self, mock_enmod):
self.config.parser.modules.remove("authz_core_module")
self.config.parser.modules.remove("mod_authz_core.c")
@@ -116,22 +116,22 @@ def test_no_vhost(self):
self.config.config.http01_port = 12345
self.assertRaises(errors.PluginError, self.http.perform)
- def test_perform_1_achall_22(self):
+ def test_perform_1_achall_apache_2_2(self):
self.combinations_perform_test(num_achalls=1, minor_version=2)
- def test_perform_1_achall_24(self):
+ def test_perform_1_achall_apache_2_4(self):
self.combinations_perform_test(num_achalls=1, minor_version=4)
- def test_perform_2_achall_22(self):
+ def test_perform_2_achall_apache_2_2(self):
self.combinations_perform_test(num_achalls=2, minor_version=2)
- def test_perform_2_achall_24(self):
+ def test_perform_2_achall_apache_2_4(self):
self.combinations_perform_test(num_achalls=2, minor_version=4)
- def test_perform_3_achall_22(self):
+ def test_perform_3_achall_apache_2_2(self):
self.combinations_perform_test(num_achalls=3, minor_version=2)
- def test_perform_3_achall_24(self):
+ def test_perform_3_achall_apache_2_4(self):
self.combinations_perform_test(num_achalls=3, minor_version=4)
def combinations_perform_test(self, num_achalls, minor_version):
| (Hopefully) helps make it clearer that that `22` and `24` corresponds to Apache 2.2 and 2.4. | https://api.github.com/repos/certbot/certbot/pulls/5865 | 2018-04-13T01:32:05Z | 2018-04-13T02:02:41Z | 2018-04-13T02:02:41Z | 2018-04-13T02:02:44Z | 705 | certbot/certbot | 3,496 |
Remove unicode in demo page names | diff --git "a/lib/streamlit/hello/pages/0_\360\237\223\271_Animation_Demo.py" b/lib/streamlit/hello/pages/0_Animation_Demo.py
similarity index 100%
rename from "lib/streamlit/hello/pages/0_\360\237\223\271_Animation_Demo.py"
rename to lib/streamlit/hello/pages/0_Animation_Demo.py
diff --git "a/lib/streamlit/hello/pages/1_\360\237\223\210_Plotting_Demo.py" b/lib/streamlit/hello/pages/1_Plotting_Demo.py
similarity index 100%
rename from "lib/streamlit/hello/pages/1_\360\237\223\210_Plotting_Demo.py"
rename to lib/streamlit/hello/pages/1_Plotting_Demo.py
diff --git "a/lib/streamlit/hello/pages/2_\360\237\214\215_Mapping_Demo.py" b/lib/streamlit/hello/pages/2_Mapping_Demo.py
similarity index 100%
rename from "lib/streamlit/hello/pages/2_\360\237\214\215_Mapping_Demo.py"
rename to lib/streamlit/hello/pages/2_Mapping_Demo.py
diff --git "a/lib/streamlit/hello/pages/3_\360\237\223\212_DataFrame_Demo.py" b/lib/streamlit/hello/pages/3_DataFrame_Demo.py
similarity index 100%
rename from "lib/streamlit/hello/pages/3_\360\237\223\212_DataFrame_Demo.py"
rename to lib/streamlit/hello/pages/3_DataFrame_Demo.py
| #5067 but without the merge conflict | https://api.github.com/repos/streamlit/streamlit/pulls/5123 | 2022-08-10T00:14:57Z | 2022-08-10T17:30:51Z | 2022-08-10T17:30:51Z | 2023-05-26T23:34:11Z | 361 | streamlit/streamlit | 22,134 |
Update google_utils.py | diff --git a/utils/google_utils.py b/utils/google_utils.py
index db36fa9d682..6a4660bad50 100644
--- a/utils/google_utils.py
+++ b/utils/google_utils.py
@@ -26,8 +26,12 @@ def attempt_download(file, repo='ultralytics/yolov5'):
assets = [x['name'] for x in response['assets']] # release assets, i.e. ['yolov5s.pt', 'yolov5m.pt', ...]
tag = response['tag_name'] # i.e. 'v1.0'
except: # fallback plan
- assets = ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']
- tag = subprocess.check_output('git tag', shell=True).decode().split()[-1]
+ assets = ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt',
+ 'yolov5s6.pt', 'yolov5m6.pt', 'yolov5l6.pt', 'yolov5x6.pt']
+ try:
+ tag = subprocess.check_output('git tag', shell=True, stderr=subprocess.STDOUT).decode().split()[-1]
+ except:
+ tag = 'v5.0' # current release
name = file.name
if name in assets:
| Possible fix for #2894.
## 🛠️ PR Summary
<sub>Made with ❤️ by [Ultralytics Actions](https://github.com/ultralytics/actions)<sub>
### 🌟 Summary
Enhancements to backup download mechanism in `utils/google_utils.py` for model assets.
### 📊 Key Changes
- Expanded the list of fallback assets to include models with a '6' suffix (e.g., `yolov5s6.pt`, `yolov5m6.pt`, etc.).
- Improved error handling by trying to get the latest tag name and falling back to a default value (`v5.0`) if unsuccessful.
### 🎯 Purpose & Impact
- **Ensures model availability:** The expanded asset list increases the chances that a user will be able to download the right model, even if the primary method fails.
- **Stability in version control:** By capturing errors when retrieving the latest git tag name, the system provides a stable fallback option, ensuring the functionality isn't broken if the git command fails.
- **User experience:** Non-expert users will experience a smoother setup process since the script is more robust and less likely to fail when downloading required model weights. | https://api.github.com/repos/ultralytics/yolov5/pulls/2900 | 2021-04-22T18:18:12Z | 2021-04-22T18:27:33Z | 2021-04-22T18:27:33Z | 2024-01-19T18:41:25Z | 333 | ultralytics/yolov5 | 24,863 |
[ie/web.archive:vlive] Remove extractor | diff --git a/yt_dlp/extractor/_extractors.py b/yt_dlp/extractor/_extractors.py
index 4fed6d66a2e..bf0c67542e8 100644
--- a/yt_dlp/extractor/_extractors.py
+++ b/yt_dlp/extractor/_extractors.py
@@ -122,7 +122,6 @@
from .archiveorg import (
ArchiveOrgIE,
YoutubeWebArchiveIE,
- VLiveWebArchiveIE,
)
from .arcpublishing import ArcPublishingIE
from .arkena import ArkenaIE
diff --git a/yt_dlp/extractor/archiveorg.py b/yt_dlp/extractor/archiveorg.py
index 2541cd6fd8d..a0b26ac5a05 100644
--- a/yt_dlp/extractor/archiveorg.py
+++ b/yt_dlp/extractor/archiveorg.py
@@ -3,7 +3,6 @@
import urllib.parse
from .common import InfoExtractor
-from .naver import NaverBaseIE
from .youtube import YoutubeBaseInfoExtractor, YoutubeIE
from ..compat import compat_urllib_parse_unquote
from ..networking import HEADRequest
@@ -947,237 +946,3 @@ def _real_extract(self, url):
if not info.get('title'):
info['title'] = video_id
return info
-
-
-class VLiveWebArchiveIE(InfoExtractor):
- IE_NAME = 'web.archive:vlive'
- IE_DESC = 'web.archive.org saved vlive videos'
- _VALID_URL = r'''(?x)
- (?:https?://)?web\.archive\.org/
- (?:web/)?(?:(?P<date>[0-9]{14})?[0-9A-Za-z_*]*/)? # /web and the version index is optional
- (?:https?(?::|%3[Aa])//)?(?:
- (?:(?:www|m)\.)?vlive\.tv(?::(?:80|443))?/(?:video|embed)/(?P<id>[0-9]+) # VLive URL
- )
- '''
- _TESTS = [{
- 'url': 'https://web.archive.org/web/20221221144331/http://www.vlive.tv/video/1326',
- 'md5': 'cc7314812855ce56de70a06a27314983',
- 'info_dict': {
- 'id': '1326',
- 'ext': 'mp4',
- 'title': "Girl's Day's Broadcast",
- 'creator': "Girl's Day",
- 'view_count': int,
- 'uploader_id': 'muploader_a',
- 'uploader_url': None,
- 'uploader': None,
- 'upload_date': '20150817',
- 'thumbnail': r're:^https?://.*\.(?:jpg|png)$',
- 'timestamp': 1439816449,
- 'like_count': int,
- 'channel': 'Girl\'s Day',
- 'channel_id': 'FDF27',
- 'comment_count': int,
- 'release_timestamp': 1439818140,
- 'release_date': '20150817',
- 'duration': 1014,
- },
- 'params': {
- 'skip_download': True,
- },
- }, {
- 'url': 'https://web.archive.org/web/20221221182103/http://www.vlive.tv/video/16937',
- 'info_dict': {
- 'id': '16937',
- 'ext': 'mp4',
- 'title': '첸백시 걍방',
- 'creator': 'EXO',
- 'view_count': int,
- 'subtitles': 'mincount:12',
- 'uploader_id': 'muploader_j',
- 'uploader_url': 'http://vlive.tv',
- 'uploader': None,
- 'upload_date': '20161112',
- 'thumbnail': r're:^https?://.*\.(?:jpg|png)$',
- 'timestamp': 1478923074,
- 'like_count': int,
- 'channel': 'EXO',
- 'channel_id': 'F94BD',
- 'comment_count': int,
- 'release_timestamp': 1478924280,
- 'release_date': '20161112',
- 'duration': 906,
- },
- 'params': {
- 'skip_download': True,
- },
- }, {
- 'url': 'https://web.archive.org/web/20221127190050/http://www.vlive.tv/video/101870',
- 'info_dict': {
- 'id': '101870',
- 'ext': 'mp4',
- 'title': '[ⓓ xV] “레벨이들 매력에 반해? 안 반해?” 움직이는 HD 포토 (레드벨벳:Red Velvet)',
- 'creator': 'Dispatch',
- 'view_count': int,
- 'subtitles': 'mincount:6',
- 'uploader_id': 'V__FRA08071',
- 'uploader_url': 'http://vlive.tv',
- 'uploader': None,
- 'upload_date': '20181130',
- 'thumbnail': r're:^https?://.*\.(?:jpg|png)$',
- 'timestamp': 1543601327,
- 'like_count': int,
- 'channel': 'Dispatch',
- 'channel_id': 'C796F3',
- 'comment_count': int,
- 'release_timestamp': 1543601040,
- 'release_date': '20181130',
- 'duration': 279,
- },
- 'params': {
- 'skip_download': True,
- },
- }]
-
- # The wayback machine has special timestamp and "mode" values:
- # timestamp:
- # 1 = the first capture
- # 2 = the last capture
- # mode:
- # id_ = Identity - perform no alterations of the original resource, return it as it was archived.
- _WAYBACK_BASE_URL = 'https://web.archive.org/web/2id_/'
-
- def _download_archived_page(self, url, video_id, *, timestamp='2', **kwargs):
- for retry in self.RetryManager():
- try:
- return self._download_webpage(f'https://web.archive.org/web/{timestamp}id_/{url}', video_id, **kwargs)
- except ExtractorError as e:
- if isinstance(e.cause, HTTPError) and e.cause.status == 404:
- raise ExtractorError('Page was not archived', expected=True)
- retry.error = e
- continue
-
- def _download_archived_json(self, url, video_id, **kwargs):
- page = self._download_archived_page(url, video_id, **kwargs)
- if not page:
- raise ExtractorError('Page was not archived', expected=True)
- else:
- return self._parse_json(page, video_id)
-
- def _extract_formats_from_m3u8(self, m3u8_url, params, video_id):
- m3u8_doc = self._download_archived_page(m3u8_url, video_id, note='Downloading m3u8', query=params, fatal=False)
- if not m3u8_doc:
- return
-
- # M3U8 document should be changed to archive domain
- m3u8_doc = m3u8_doc.splitlines()
- url_base = m3u8_url.rsplit('/', 1)[0]
- first_segment = None
- for i, line in enumerate(m3u8_doc):
- if not line.startswith('#'):
- m3u8_doc[i] = f'{self._WAYBACK_BASE_URL}{url_base}/{line}?{urllib.parse.urlencode(params)}'
- first_segment = first_segment or m3u8_doc[i]
-
- # Segments may not have been archived. See https://web.archive.org/web/20221127190050/http://www.vlive.tv/video/101870
- urlh = self._request_webpage(HEADRequest(first_segment), video_id, errnote=False,
- fatal=False, note='Check first segment availablity')
- if urlh:
- formats, subtitles = self._parse_m3u8_formats_and_subtitles('\n'.join(m3u8_doc), ext='mp4', video_id=video_id)
- if subtitles:
- self._report_ignoring_subs('m3u8')
- return formats
-
- # Closely follows the logic of the ArchiveTeam grab script
- # See: https://github.com/ArchiveTeam/vlive-grab/blob/master/vlive.lua
- def _real_extract(self, url):
- video_id, url_date = self._match_valid_url(url).group('id', 'date')
-
- webpage = self._download_archived_page(f'https://www.vlive.tv/video/{video_id}', video_id, timestamp=url_date)
-
- player_info = self._search_json(r'__PRELOADED_STATE__\s*=', webpage, 'player info', video_id)
- user_country = traverse_obj(player_info, ('common', 'userCountry'))
-
- main_script_url = self._search_regex(r'<script\s+src="([^"]+/js/main\.[^"]+\.js)"', webpage, 'main script url')
- main_script = self._download_archived_page(main_script_url, video_id, note='Downloading main script')
- app_id = self._search_regex(r'appId\s*=\s*"([^"]+)"', main_script, 'app id')
-
- inkey = self._download_archived_json(
- f'https://www.vlive.tv/globalv-web/vam-web/video/v1.0/vod/{video_id}/inkey', video_id, note='Fetching inkey', query={
- 'appId': app_id,
- 'platformType': 'PC',
- 'gcc': user_country,
- 'locale': 'en_US',
- }, fatal=False)
-
- vod_id = traverse_obj(player_info, ('postDetail', 'post', 'officialVideo', 'vodId'))
-
- vod_data = self._download_archived_json(
- f'https://apis.naver.com/rmcnmv/rmcnmv/vod/play/v2.0/{vod_id}', video_id, note='Fetching vod data', query={
- 'key': inkey.get('inkey'),
- 'pid': 'rmcPlayer_16692457559726800', # partially unix time and partially random. Fixed value used by archiveteam project
- 'sid': '2024',
- 'ver': '2.0',
- 'devt': 'html5_pc',
- 'doct': 'json',
- 'ptc': 'https',
- 'sptc': 'https',
- 'cpt': 'vtt',
- 'ctls': '%7B%22visible%22%3A%7B%22fullscreen%22%3Atrue%2C%22logo%22%3Afalse%2C%22playbackRate%22%3Afalse%2C%22scrap%22%3Afalse%2C%22playCount%22%3Atrue%2C%22commentCount%22%3Atrue%2C%22title%22%3Atrue%2C%22writer%22%3Atrue%2C%22expand%22%3Afalse%2C%22subtitles%22%3Atrue%2C%22thumbnails%22%3Atrue%2C%22quality%22%3Atrue%2C%22setting%22%3Atrue%2C%22script%22%3Afalse%2C%22logoDimmed%22%3Atrue%2C%22badge%22%3Atrue%2C%22seekingTime%22%3Atrue%2C%22muted%22%3Atrue%2C%22muteButton%22%3Afalse%2C%22viewerNotice%22%3Afalse%2C%22linkCount%22%3Afalse%2C%22createTime%22%3Afalse%2C%22thumbnail%22%3Atrue%7D%2C%22clicked%22%3A%7B%22expand%22%3Afalse%2C%22subtitles%22%3Afalse%7D%7D',
- 'pv': '4.26.9',
- 'dr': '1920x1080',
- 'cpl': 'en_US',
- 'lc': 'en_US',
- 'adi': '%5B%7B%22type%22%3A%22pre%22%2C%22exposure%22%3Afalse%2C%22replayExposure%22%3Afalse%7D%5D',
- 'adu': '%2F',
- 'videoId': vod_id,
- 'cc': user_country,
- })
-
- formats = []
-
- streams = traverse_obj(vod_data, ('streams', ...))
- if len(streams) > 1:
- self.report_warning('Multiple streams found. Only the first stream will be downloaded.')
- stream = streams[0]
-
- max_stream = max(
- stream.get('videos') or [],
- key=lambda v: traverse_obj(v, ('bitrate', 'video'), default=0), default=None)
- if max_stream is not None:
- params = {arg.get('name'): arg.get('value') for arg in stream.get('keys', []) if arg.get('type') == 'param'}
- formats = self._extract_formats_from_m3u8(max_stream.get('source'), params, video_id) or []
-
- # For parts of the project MP4 files were archived
- max_video = max(
- traverse_obj(vod_data, ('videos', 'list', ...)),
- key=lambda v: traverse_obj(v, ('bitrate', 'video'), default=0), default=None)
- if max_video is not None:
- video_url = self._WAYBACK_BASE_URL + max_video.get('source')
- urlh = self._request_webpage(HEADRequest(video_url), video_id, errnote=False,
- fatal=False, note='Check video availablity')
- if urlh:
- formats.append({'url': video_url})
-
- return {
- 'id': video_id,
- 'formats': formats,
- **traverse_obj(player_info, ('postDetail', 'post', {
- 'title': ('officialVideo', 'title', {str}),
- 'creator': ('author', 'nickname', {str}),
- 'channel': ('channel', 'channelName', {str}),
- 'channel_id': ('channel', 'channelCode', {str}),
- 'duration': ('officialVideo', 'playTime', {int_or_none}),
- 'view_count': ('officialVideo', 'playCount', {int_or_none}),
- 'like_count': ('officialVideo', 'likeCount', {int_or_none}),
- 'comment_count': ('officialVideo', 'commentCount', {int_or_none}),
- 'timestamp': ('officialVideo', 'createdAt', {lambda x: int_or_none(x, scale=1000)}),
- 'release_timestamp': ('officialVideo', 'willStartAt', {lambda x: int_or_none(x, scale=1000)}),
- })),
- **traverse_obj(vod_data, ('meta', {
- 'uploader_id': ('user', 'id', {str}),
- 'uploader': ('user', 'name', {str}),
- 'uploader_url': ('user', 'url', {url_or_none}),
- 'thumbnail': ('cover', 'source', {url_or_none}),
- }), expected_type=lambda x: x or None),
- **NaverBaseIE.process_subtitles(vod_data, lambda x: [self._WAYBACK_BASE_URL + x]),
- }
diff --git a/yt_dlp/extractor/naver.py b/yt_dlp/extractor/naver.py
index d79caf5f3d1..2d8459b02bb 100644
--- a/yt_dlp/extractor/naver.py
+++ b/yt_dlp/extractor/naver.py
@@ -21,7 +21,7 @@
class NaverBaseIE(InfoExtractor):
_CAPTION_EXT_RE = r'\.(?:ttml|vtt)'
- @staticmethod # NB: Used in VLiveWebArchiveIE, WeverseIE
+ @staticmethod # NB: Used in WeverseIE
def process_subtitles(vod_data, process_url):
ret = {'subtitles': {}, 'automatic_captions': {}}
for caption in traverse_obj(vod_data, ('captions', 'list', ...)):
| Closes #8122
<details open><summary>Template</summary> <!-- OPEN is intentional -->
<!--
# PLEASE FOLLOW THE GUIDE BELOW
- You will be asked some questions, please read them **carefully** and answer honestly
- Put an `x` into all the boxes `[ ]` relevant to your *pull request* (like [x])
- Use *Preview* tab to see how your *pull request* will actually look like
-->
### Before submitting a *pull request* make sure you have:
- [x] At least skimmed through [contributing guidelines](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#developer-instructions) including [yt-dlp coding conventions](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#yt-dlp-coding-conventions)
- [x] [Searched](https://github.com/yt-dlp/yt-dlp/search?q=is%3Apr&type=Issues) the bugtracker for similar pull requests
- [x] Checked the code with [flake8](https://pypi.python.org/pypi/flake8) and [ran relevant tests](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#developer-instructions)
### In order to be accepted and merged into yt-dlp each piece of code must be in public domain or released under [Unlicense](http://unlicense.org/). Check all of the following options that apply:
- [x] I am the original author of this code and I am willing to release it under [Unlicense](http://unlicense.org/)
- [ ] I am not the original author of this code but it is in public domain or released under [Unlicense](http://unlicense.org/) (provide reliable evidence)
### What is the purpose of your *pull request*?
- [ ] Fix or improvement to an extractor (Make sure to add/update tests)
- [ ] New extractor ([Piracy websites will not be accepted](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#is-the-website-primarily-used-for-piracy))
- [ ] Core bug fix/improvement
- [ ] New feature (It is strongly [recommended to open an issue first](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#adding-new-feature-or-making-overarching-changes))
<!-- Do NOT edit/remove anything below this! -->
</details><details><summary>Copilot Summary</summary>
<!--
copilot:all
-->
### <samp>🤖 Generated by Copilot at 6b14fb2</samp>
### Summary
:truck::memo::scissors:
<!--
1. :truck: This emoji represents the action of moving or relocating something, such as a file or a class. It can be used to indicate that the `VLiveWebArchiveIE` extractor class was moved from `_extractors.py` to `archiveorg.py`.
2. :memo: This emoji represents the action of writing or updating a document, such as a comment or a note. It can be used to indicate that the comment of the `process_subtitles` method was updated to reflect the change in the extractor classes that use it.
3. :scissors: This emoji represents the action of cutting or removing something, such as an import or a line of code. It can be used to indicate that the import of `NaverBaseIE` was removed from `archiveorg.py` since it was not used by the `VLiveWebArchiveIE` extractor class.
-->
This pull request refactors the codebase to split the extractors into different files based on their domains or categories. It moves the `VLiveWebArchiveIE` extractor class from `_extractors.py` to `archiveorg.py` and updates the comment of the `process_subtitles` method in `naver.py` accordingly. This improves the organization and readability of the code.
> _Oh, we're the brave extractors of the web_
> _We fetch the videos from every site and thread_
> _But when the code gets messy and too long_
> _We move the classes to where they belong_
### Walkthrough
* Relocate `VLiveWebArchiveIE` extractor class from `_extractors.py` to `archiveorg.py` to group it with other web.archive.org extractors ([link](https://github.com/yt-dlp/yt-dlp/pull/8132/files?diff=unified&w=0#diff-780b22dc7eb280f5a7b2bbf79aff17826de88ddcbf2fc1116ba19901827aa4e3L125), [link](https://github.com/yt-dlp/yt-dlp/pull/8132/files?diff=unified&w=0#diff-965c9f468a995b8a1d436ca590ad139272ee15e069b8f8528c513eecf3da5b60L950-L1183))
* Remove unused import of `NaverBaseIE` from `archiveorg.py` to clean up the code ([link](https://github.com/yt-dlp/yt-dlp/pull/8132/files?diff=unified&w=0#diff-965c9f468a995b8a1d436ca590ad139272ee15e069b8f8528c513eecf3da5b60L6))
* Update comment of `process_subtitles` method in `naver.py` to remove reference to `VLiveWebArchiveIE` ([link](https://github.com/yt-dlp/yt-dlp/pull/8132/files?diff=unified&w=0#diff-a8b2ae4bbf6996f7f9a3654d689ed368eca8c96895c458ca7a489680ff659d83L24-R24))
</details>
| https://api.github.com/repos/yt-dlp/yt-dlp/pulls/8132 | 2023-09-16T23:35:46Z | 2023-09-17T00:38:09Z | 2023-09-17T00:38:09Z | 2023-12-07T15:16:34Z | 3,805 | yt-dlp/yt-dlp | 8,108 |
Add safaribooksonline extractor | diff --git a/youtube_dl/extractor/__init__.py b/youtube_dl/extractor/__init__.py
index d73826d44bf..3a0c42deda3 100644
--- a/youtube_dl/extractor/__init__.py
+++ b/youtube_dl/extractor/__init__.py
@@ -420,6 +420,10 @@
)
from .rutv import RUTVIE
from .sandia import SandiaIE
+from .safari import (
+ SafariIE,
+ SafariCourseIE,
+)
from .sapo import SapoIE
from .savefrom import SaveFromIE
from .sbs import SBSIE
diff --git a/youtube_dl/extractor/safari.py b/youtube_dl/extractor/safari.py
new file mode 100644
index 00000000000..3e494b9605c
--- /dev/null
+++ b/youtube_dl/extractor/safari.py
@@ -0,0 +1,144 @@
+# encoding: utf-8
+from __future__ import unicode_literals
+
+import re
+import json
+
+from .common import InfoExtractor
+from .brightcove import BrightcoveIE
+
+from ..compat import (
+ compat_urllib_parse,
+ compat_urllib_request,
+)
+from ..utils import (
+ ExtractorError,
+ smuggle_url,
+ std_headers,
+)
+
+
+class SafariBaseIE(InfoExtractor):
+ _LOGIN_URL = 'https://www.safaribooksonline.com/accounts/login/'
+ _SUCCESSFUL_LOGIN_REGEX = r'<a href="/accounts/logout/"[^>]+>Sign Out</a>'
+ _ACCOUNT_CREDENTIALS_HINT = ('Use --username and --password options to '
+ 'supply credentials for safaribooksonline.com ')
+ _NETRC_MACHINE = 'safaribooksonline'
+
+ LOGGED_IN = False
+
+ def _real_initialize(self):
+ # We only need to log in once for courses or individual videos
+ if not SafariBaseIE.LOGGED_IN:
+ self._login()
+ SafariBaseIE.LOGGED_IN = True
+
+ def _login(self):
+ (username, password) = self._get_login_info()
+ if username is None:
+ raise ExtractorError(
+ self._ACCOUNT_CREDENTIALS_HINT,
+ expected=True)
+
+ headers = std_headers
+ if 'Referer' not in headers:
+ headers['Referer'] = self._LOGIN_URL
+
+ login_page = self._download_webpage(
+ self._LOGIN_URL, None,
+ 'Downloading login form')
+
+ csrf = self._html_search_regex(
+ r"<input +type='hidden' +name='csrfmiddlewaretoken' +value='([^']+)' +/>",
+ login_page, 'csrf token')
+
+ login_form = {
+ 'csrfmiddlewaretoken': csrf,
+ 'email': username,
+ 'password1': password,
+ 'login': 'Sign In',
+ 'next': '',
+ }
+
+ request = compat_urllib_request.Request(
+ self._LOGIN_URL, compat_urllib_parse.urlencode(login_form), headers=headers)
+ login_page = self._download_webpage(
+ request, None, 'Logging in as %s' % username)
+
+ if re.search(self._SUCCESSFUL_LOGIN_REGEX, login_page) is None:
+ raise ExtractorError('Login failed; make sure your credentials are correct and '
+ 'try again.', expected=True)
+
+ self.to_screen('Login successful')
+
+
+class SafariIE(SafariBaseIE):
+ IE_NAME = 'safari'
+ IE_DESC = 'safaribooksonline.com online video'
+ _VALID_URL = (r'https?://(?:www\.)?safaribooksonline\.com/library/view/[^/]+/'
+ '(?P<id>\d+)/(?P<part>part\d+)\.html')
+ _TEST = {
+ 'url': ('https://www.safaribooksonline.com/library/view/'
+ 'hadoop-fundamentals-livelessons/9780133392838/part00.html'),
+ 'md5': '5b0c4cc1b3c1ba15dda7344085aa5592',
+ 'info_dict': {
+ 'id': '9780133392838',
+ 'ext': 'mp4',
+ 'title': 'Introduction',
+ }
+ }
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ part = mobj.group('part')
+
+ webpage = self._download_webpage(url, part)
+ bc_url = BrightcoveIE._extract_brightcove_url(webpage)
+ if not bc_url:
+ raise ExtractorError('Could not extract Brightcove URL from %s' % url, expected=True)
+
+ return {
+ '_type': 'url',
+ 'url': smuggle_url(bc_url, {'Referer': url}),
+ 'ie_key': 'Brightcove'
+ }
+
+
+class SafariCourseIE(SafariBaseIE):
+ IE_NAME = 'safari:course'
+ IE_DESC = 'safaribooksonline.com online courses'
+
+ _VALID_URL = (r'https?://(?:www\.)?safaribooksonline\.com/library/view/'
+ '(?P<course_path>[^/]+)/(?P<id>\d+)/?$')
+
+ _API_BASE = 'https://www.safaribooksonline.com/api/v1/book'
+ _API_FORMAT = 'json'
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ course_path = mobj.group('course_path')
+ course_id = mobj.group('id')
+
+ webpage = self._download_webpage(
+ '%s/%s/?override_format=%s' % (self._API_BASE, course_id, self._API_FORMAT),
+ course_path, 'Downloading course JSON')
+
+ course_json = json.loads(webpage)
+
+ if 'chapters' not in course_json:
+ raise ExtractorError('No chapters found for course %s' % course_id, expected=True)
+
+ num_parts = len(course_json['chapters'])
+ parts = ['%02d' % part for part in range(num_parts)]
+
+ entries = [
+ self.url_result(
+ 'https://www.safaribooksonline.com/library/view/%s/%s/part%s.html' % (course_path,
+ course_id,
+ part_id),
+ 'Safari')
+ for part_id in parts]
+
+ course_title = course_json['title']
+
+ return self.playlist_result(entries, course_id, course_title)
| Following on from https://github.com/rg3/youtube-dl/issues/5253, I decided to give implementing this a go myself.
| https://api.github.com/repos/ytdl-org/youtube-dl/pulls/5262 | 2015-03-22T18:06:53Z | 2015-03-26T17:58:19Z | 2015-03-26T17:58:19Z | 2015-11-12T17:50:13Z | 1,544 | ytdl-org/youtube-dl | 50,598 |